Merge
authorjwilhelm
Fri, 13 Apr 2018 03:05:19 +0200
changeset 49681 4beba2c2a329
parent 49589 4d3f0fea5469 (current diff)
parent 49680 2e681d678ec8 (diff)
child 49682 2918e1146106
child 49761 46dc568d6804
child 56422 b09629f4b243
Merge
make/hotspot/lib/CompileJvm.gmk
make/lib/Awt2dLibraries.gmk
make/mapfiles/libjsig/mapfile-vers-solaris
src/hotspot/share/gc/g1/concurrentMarkThread.cpp
src/hotspot/share/gc/g1/concurrentMarkThread.hpp
src/hotspot/share/gc/g1/concurrentMarkThread.inline.hpp
src/hotspot/share/gc/g1/g1CardLiveData.cpp
src/hotspot/share/gc/g1/g1CardLiveData.hpp
src/hotspot/share/gc/g1/g1CardLiveData.inline.hpp
test/jdk/ProblemList.txt
--- a/make/autoconf/flags-cflags.m4	Fri Apr 13 09:06:37 2018 +0800
+++ b/make/autoconf/flags-cflags.m4	Fri Apr 13 03:05:19 2018 +0200
@@ -453,6 +453,7 @@
   elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
     ALWAYS_DEFINES_JDK="-DWIN32_LEAN_AND_MEAN -D_CRT_SECURE_NO_DEPRECATE \
         -D_CRT_NONSTDC_NO_DEPRECATE -DWIN32 -DIAL"
+    ALWAYS_DEFINES_JVM="-DNOMINMAX"
   fi
 
   ###############################################################################
--- a/make/autoconf/libraries.m4	Fri Apr 13 09:06:37 2018 +0800
+++ b/make/autoconf/libraries.m4	Fri Apr 13 03:05:19 2018 +0200
@@ -114,17 +114,7 @@
   fi
 
   # Math library
-  if test "x$OPENJDK_TARGET_OS" != xsolaris; then
-    BASIC_JVM_LIBS="$LIBM"
-  else
-    # FIXME: This hard-coded path is not really proper.
-    if test "x$OPENJDK_TARGET_CPU" = xx86_64; then
-      BASIC_SOLARIS_LIBM_LIBS="/usr/lib/amd64/libm.so.1"
-    elif test "x$OPENJDK_TARGET_CPU" = xsparcv9; then
-      BASIC_SOLARIS_LIBM_LIBS="/usr/lib/sparcv9/libm.so.1"
-    fi
-    BASIC_JVM_LIBS="$BASIC_SOLARIS_LIBM_LIBS"
-  fi
+  BASIC_JVM_LIBS="$LIBM"
 
   # Dynamic loading library
   if test "x$OPENJDK_TARGET_OS" = xlinux || test "x$OPENJDK_TARGET_OS" = xsolaris || test "x$OPENJDK_TARGET_OS" = xaix; then
--- a/make/autoconf/platform.m4	Fri Apr 13 09:06:37 2018 +0800
+++ b/make/autoconf/platform.m4	Fri Apr 13 03:05:19 2018 +0200
@@ -60,6 +60,12 @@
       VAR_CPU_BITS=64
       VAR_CPU_ENDIAN=little
       ;;
+    ia64)
+      VAR_CPU=ia64
+      VAR_CPU_ARCH=ia64
+      VAR_CPU_BITS=64
+      VAR_CPU_ENDIAN=little
+      ;;
     m68k)
       VAR_CPU=m68k
       VAR_CPU_ARCH=m68k
--- a/make/hotspot/lib/CompileJvm.gmk	Fri Apr 13 09:06:37 2018 +0800
+++ b/make/hotspot/lib/CompileJvm.gmk	Fri Apr 13 03:05:19 2018 +0200
@@ -113,6 +113,11 @@
   else ifeq ($(OPENJDK_TARGET_CPU), sparcv9)
     JVM_CFLAGS += $(TOPDIR)/src/hotspot/os_cpu/solaris_sparc/solaris_sparc.il
   endif
+  # Exclude warnings in devstudio 12.6
+  ifeq ($(CC_VERSION_NUMBER), 5.15)
+    DISABLED_WARNINGS_solstudio := SEC_ARR_OUTSIDE_BOUND_READ \
+      SEC_ARR_OUTSIDE_BOUND_WRITE
+  endif
 endif
 
 ifeq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU), solaris-sparcv9)
@@ -154,6 +159,7 @@
     vm_version.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
     arguments.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
     DISABLED_WARNINGS_clang := tautological-compare, \
+    DISABLED_WARNINGS_solstudio := $(DISABLED_WARNINGS_solstudio), \
     DISABLED_WARNINGS_xlc := 1540-0216 1540-0198 1540-1090 1540-1639 \
         1540-1088 1500-010, \
     ASFLAGS := $(JVM_ASFLAGS), \
--- a/make/lib/Awt2dLibraries.gmk	Fri Apr 13 09:06:37 2018 +0800
+++ b/make/lib/Awt2dLibraries.gmk	Fri Apr 13 03:05:19 2018 +0200
@@ -403,11 +403,7 @@
     LDFLAGS := $(LDFLAGS_JDKLIB) \
         $(call SET_SHARED_LIBRARY_ORIGIN), \
     LDFLAGS_unix := -L$(INSTALL_LIBRARIES_HERE), \
-    LDFLAGS_solaris := /usr/lib$(OPENJDK_TARGET_CPU_ISADIR)/libm.so.2, \
-    LIBS_unix := -lawt -ljvm -ljava $(LCMS_LIBS), \
-    LIBS_linux := $(LIBM), \
-    LIBS_macosx := $(LIBM), \
-    LIBS_aix := $(LIBM),\
+    LIBS_unix := -lawt -ljvm -ljava $(LCMS_LIBS) $(LIBM), \
     LIBS_windows := $(WIN_AWT_LIB) $(WIN_JAVA_LIB), \
 ))
 
--- a/make/test/JtregNativeHotspot.gmk	Fri Apr 13 09:06:37 2018 +0800
+++ b/make/test/JtregNativeHotspot.gmk	Fri Apr 13 03:05:19 2018 +0200
@@ -65,8 +65,11 @@
       exeinvoke.c exestack-gap.c
 endif
 
+BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exesigtest := -ljvm
+
 ifeq ($(OPENJDK_TARGET_OS), windows)
     BUILD_HOTSPOT_JTREG_EXECUTABLES_CFLAGS_exeFPRegs := -MT
+    BUILD_HOTSPOT_JTREG_EXCLUDE += exesigtest.c
 endif
 
 $(eval $(call SetupTestFilesCompilation, BUILD_HOTSPOT_JTREG_LIBRARIES, \
--- a/src/bsd/doc/man/java.1	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/bsd/doc/man/java.1	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 '\" t
-.\" Copyright (c) 1994, 2015, Oracle and/or its affiliates. All rights reserved.
+.\" Copyright (c) 1994, 2018, Oracle and/or its affiliates. All rights reserved.
 .\" DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 .\"
 .\" This code is free software; you can redistribute it and/or modify it
@@ -1178,65 +1178,6 @@
 .PP
 These options control the runtime behavior of the Java HotSpot VM\&.
 .PP
-\-XX:+CheckEndorsedAndExtDirs
-.RS 4
-Enables the option to prevent the
-\fBjava\fR
-command from running a Java application if it uses the endorsed\-standards override mechanism or the extension mechanism\&. This option checks if an application is using one of these mechanisms by checking the following:
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The
-\fBjava\&.ext\&.dirs\fR
-or
-\fBjava\&.endorsed\&.dirs\fR
-system property is set\&.
-.RE
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The
-\fBlib/endorsed\fR
-directory exists and is not empty\&.
-.RE
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The
-\fBlib/ext\fR
-directory contains any JAR files other than those of the JDK\&.
-.RE
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The system\-wide platform\-specific extension directory contains any JAR files\&.
-.RE
-.RE
-.PP
 \-XX:+DisableAttachMechanism
 .RS 4
 Enables the option that disables the mechanism that lets tools attach to the JVM\&. By default, this option is disabled, meaning that the attach mechanism is enabled and you can use tools such as
--- a/src/hotspot/cpu/aarch64/aarch64.ad	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/aarch64/aarch64.ad	Fri Apr 13 03:05:19 2018 +0200
@@ -995,8 +995,10 @@
 
 source_hpp %{
 
+#include "asm/macroAssembler.hpp"
 #include "gc/shared/cardTable.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
+#include "gc/shared/collectedHeap.hpp"
 #include "opto/addnode.hpp"
 
 class CallStubImpl {
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -35,8 +35,9 @@
 #include "compiler/disassembler.hpp"
 #include "memory/resourceArea.hpp"
 #include "nativeInst_aarch64.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/oop.hpp"
 #include "opto/compile.hpp"
 #include "opto/intrinsicnode.hpp"
 #include "opto/node.hpp"
@@ -46,7 +47,6 @@
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/thread.hpp"
-
 #if INCLUDE_ALL_GCS
 #include "gc/g1/g1BarrierSet.hpp"
 #include "gc/g1/g1CardTable.hpp"
@@ -173,7 +173,7 @@
   // instruction.
   if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) {
     // Move narrow OOP
-    narrowOop n = oopDesc::encode_heap_oop((oop)o);
+    narrowOop n = CompressedOops::encode((oop)o);
     Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
     Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
     instructions = 2;
@@ -3712,7 +3712,7 @@
   }
 }
 
-// Algorithm must match oop.inline.hpp encode_heap_oop.
+// Algorithm must match CompressedOops::encode.
 void MacroAssembler::encode_heap_oop(Register d, Register s) {
 #ifdef ASSERT
   verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,7 +27,6 @@
 #define CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP
 
 #include "asm/assembler.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 
--- a/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -31,7 +31,7 @@
 class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
 protected:
   void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
-                                       Register addr, Register count, , int callee_saved_regs);
+                                       Register addr, Register count, int callee_saved_regs);
   void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
                                         Register addr, Register count, Register tmp);
 };
--- a/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -32,7 +32,7 @@
 class BarrierSetAssembler: public CHeapObj<mtGC> {
 public:
   virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
-                                  Register addr, Register count, , int callee_saved_regs) {}
+                                  Register addr, Register count, int callee_saved_regs) {}
   virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
                                   Register addr, Register count, Register tmp) {}
 };
--- a/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -44,6 +44,7 @@
 void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
                                                                     Register addr, Register count, Register tmp) {
   BLOCK_COMMENT("CardTablePostBarrier");
+  BarrierSet* bs = Universe::heap()->barrier_set();
   CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
   CardTable* ct = ctbs->card_table();
   assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
--- a/src/hotspot/cpu/arm/gc/shared/modRefBarrierSetAssembler_arm.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/arm/gc/shared/modRefBarrierSetAssembler_arm.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -31,7 +31,7 @@
 class ModRefBarrierSetAssembler: public BarrierSetAssembler {
 protected:
   virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
-                                               Register addr, Register count, , int callee_saved_regs) {}
+                                               Register addr, Register count, int callee_saved_regs) {}
   virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
                                                 Register addr, Register count, Register tmp) {}
 
--- a/src/hotspot/cpu/arm/interpreterRT_arm.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/arm/interpreterRT_arm.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -37,7 +37,7 @@
 
 #define __ _masm->
 
-Interpreter::SignatureHandlerGenerator::SignatureHandlerGenerator(
+InterpreterRuntime::SignatureHandlerGenerator::SignatureHandlerGenerator(
     const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
   _masm = new MacroAssembler(buffer);
   _abi_offset = 0;
--- a/src/hotspot/cpu/arm/nativeInst_arm.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/arm/nativeInst_arm.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
 #define CPU_ARM_VM_NATIVEINST_ARM_HPP
 
 #include "asm/macroAssembler.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 
--- a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,7 +27,6 @@
 
 #include "asm/macroAssembler.hpp"
 #include "code/codeCache.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 #include "runtime/thread.hpp"
--- a/src/hotspot/cpu/arm/nativeInst_arm_64.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/arm/nativeInst_arm_64.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,8 +27,9 @@
 #include "code/codeCache.hpp"
 #include "memory/resourceArea.hpp"
 #include "nativeInst_arm.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/oop.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
@@ -105,7 +106,7 @@
     uintptr_t nx = 0;
     int val_size = 32;
     if (oop_addr != NULL) {
-      narrowOop encoded_oop = oopDesc::encode_heap_oop(*oop_addr);
+      narrowOop encoded_oop = CompressedOops::encode(*oop_addr);
       nx = encoded_oop;
     } else if (metadata_addr != NULL) {
       assert((*metadata_addr)->is_klass(), "expected Klass");
@@ -240,4 +241,3 @@
   assert(NativeCall::is_call_before(return_address), "must be");
   return nativeCall_at(call_for(return_address));
 }
-
--- a/src/hotspot/cpu/arm/nativeInst_arm_64.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/arm/nativeInst_arm_64.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,7 +27,6 @@
 
 #include "asm/macroAssembler.hpp"
 #include "code/codeCache.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 
--- a/src/hotspot/cpu/arm/relocInfo_arm.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/arm/relocInfo_arm.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,7 +27,8 @@
 #include "assembler_arm.inline.hpp"
 #include "code/relocInfo.hpp"
 #include "nativeInst_arm.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "oops/oop.hpp"
 #include "runtime/safepoint.hpp"
 
 void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
@@ -40,7 +41,7 @@
       uintptr_t d = ni->data();
       guarantee((d >> 32) == 0, "not narrow oop");
       narrowOop no = d;
-      oop o = oopDesc::decode_heap_oop(no);
+      oop o = CompressedOops::decode(no);
       guarantee(cast_from_oop<intptr_t>(o) == (intptr_t)x, "instructions must match");
     } else {
       ni->set_data((intptr_t)x);
--- a/src/hotspot/cpu/arm/stubGenerator_arm.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/arm/stubGenerator_arm.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -2877,7 +2877,7 @@
     // 'to' is the beginning of the region
 
     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
-    bs->arraycopy_epilogue(this, decorators, true, to, count, tmp);
+    bs->arraycopy_epilogue(_masm, decorators, true, to, count, tmp);
 
     if (status) {
       __ mov(R0, 0); // OK
@@ -2954,7 +2954,7 @@
     }
 
     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
-    bs->arraycopy_prologue(this, decorators, true, to, count, callee_saved_regs);
+    bs->arraycopy_prologue(_masm, decorators, true, to, count, callee_saved_regs);
 
     // save arguments for barrier generation (after the pre barrier)
     __ mov(saved_count, count);
@@ -3220,7 +3220,7 @@
     DecoratorSet decorators = ARRAYCOPY_CHECKCAST;
 
     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
-    bs->arraycopy_prologue(this, decorators, true, to, count, callee_saved_regs);
+    bs->arraycopy_prologue(_masm, decorators, true, to, count, callee_saved_regs);
 
 #ifndef AARCH64
     const RegisterSet caller_saved_regs = RegisterSet(R4,R6) | RegisterSet(R8,R9) | altFP_7_11;
@@ -3298,7 +3298,7 @@
     __ sub(to, to, AsmOperand(copied, lsl, LogBytesPerHeapOop)); // initial to value
     __ mov(R12, copied); // count arg scratched by post barrier
 
-    bs->arraycopy_epilogue(this, decorators, true, to, R12, R3);
+    bs->arraycopy_epilogue(_masm, decorators, true, to, R12, R3);
 
     assert_different_registers(R3,R12,LR,copied,saved_count);
     inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R3, R12);
--- a/src/hotspot/cpu/ppc/frame_ppc.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/ppc/frame_ppc.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -384,7 +384,7 @@
 
   // Constructors
   inline frame(intptr_t* sp);
-  frame(intptr_t* sp, address pc);
+  inline frame(intptr_t* sp, address pc);
   inline frame(intptr_t* sp, address pc, intptr_t* unextended_sp);
 
  private:
--- a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,8 +27,10 @@
 #include "asm/macroAssembler.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "nativeInst_ppc.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "oops/oop.hpp"
 #include "runtime/handles.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "utilities/ostream.hpp"
@@ -194,7 +196,7 @@
   CodeBlob* cb = CodeCache::find_blob_unsafe(addr);
   if (MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) {
     narrowOop no = (narrowOop)MacroAssembler::get_narrow_oop(addr, cb->content_begin());
-    return cast_from_oop<intptr_t>(oopDesc::decode_heap_oop(no));
+    return cast_from_oop<intptr_t>(CompressedOops::decode(no));
   } else {
     assert(MacroAssembler::is_load_const_from_method_toc_at(addr), "must be load_const_from_pool");
 
@@ -415,4 +417,3 @@
 
   *(address*)(ctable + destination_toc_offset()) = new_destination;
 }
-
--- a/src/hotspot/cpu/ppc/nativeInst_ppc.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/ppc/nativeInst_ppc.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,7 +27,6 @@
 #define CPU_PPC_VM_NATIVEINST_PPC_HPP
 
 #include "asm/macroAssembler.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 #include "runtime/safepointMechanism.hpp"
--- a/src/hotspot/cpu/ppc/relocInfo_ppc.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/ppc/relocInfo_ppc.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,8 +27,9 @@
 #include "asm/assembler.inline.hpp"
 #include "code/relocInfo.hpp"
 #include "nativeInst_ppc.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/oop.hpp"
 #include "runtime/safepoint.hpp"
 
 void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
@@ -57,7 +58,7 @@
       assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type,
              "how to encode else?");
       narrowOop no = (type() == relocInfo::oop_type) ?
-        oopDesc::encode_heap_oop((oop)x) : Klass::encode_klass((Klass*)x);
+          CompressedOops::encode((oop)x) : Klass::encode_klass((Klass*)x);
       nativeMovConstReg_at(addr())->set_narrow_oop(no, code());
     }
   } else {
--- a/src/hotspot/cpu/s390/frame_s390.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/s390/frame_s390.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -465,10 +465,10 @@
  // Constructors
 
  public:
-  frame(intptr_t* sp);
+  inline frame(intptr_t* sp);
   // To be used, if sp was not extended to match callee's calling convention.
-  frame(intptr_t* sp, address pc);
-  frame(intptr_t* sp, address pc, intptr_t* unextended_sp);
+  inline frame(intptr_t* sp, address pc);
+  inline frame(intptr_t* sp, address pc, intptr_t* unextended_sp);
 
   // Access frame via stack pointer.
   inline intptr_t* sp_addr_at(int index) const  { return &sp()[index]; }
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -33,6 +33,7 @@
 #include "gc/shared/cardTableBarrierSet.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
 #include "opto/compile.hpp"
 #include "opto/intrinsicnode.hpp"
@@ -1286,7 +1287,7 @@
 int MacroAssembler::patch_load_narrow_oop(address pos, oop o) {
   assert(UseCompressedOops, "Can only patch compressed oops");
 
-  narrowOop no = oopDesc::encode_heap_oop(o);
+  narrowOop no = CompressedOops::encode(o);
   return patch_load_const_32to64(pos, no);
 }
 
@@ -1304,7 +1305,7 @@
 int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) {
   assert(UseCompressedOops, "Can only patch compressed oops");
 
-  narrowOop no = oopDesc::encode_heap_oop(o);
+  narrowOop no = CompressedOops::encode(o);
   return patch_compare_immediate_32(pos, no);
 }
 
--- a/src/hotspot/cpu/s390/nativeInst_s390.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/s390/nativeInst_s390.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -29,7 +29,6 @@
 #define CPU_S390_VM_NATIVEINST_S390_HPP
 
 #include "asm/macroAssembler.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 
--- a/src/hotspot/cpu/sparc/interp_masm_sparc.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/sparc/interp_masm_sparc.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -41,17 +41,6 @@
 REGISTER_DECLARATION(FloatRegister, Ftos_d1, F0); // for 1st part of double
 REGISTER_DECLARATION(FloatRegister, Ftos_d2, F1); // for 2nd part of double
 
-#ifndef DONT_USE_REGISTER_DEFINES
-#define Otos_i  O0
-#define Otos_l  O0
-#define Otos_l1 O0
-#define Otos_l2 O1
-#define Ftos_f  F0
-#define Ftos_d  F0
-#define Ftos_d1 F0
-#define Ftos_d2 F1
-#endif // DONT_USE_REGISTER_DEFINES
-
 class InterpreterMacroAssembler: public MacroAssembler {
  protected:
   // Interpreter specific version of call_VM_base
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -998,8 +998,13 @@
 
 
 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
-  assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
-  assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop");
+#ifdef ASSERT
+  {
+    ThreadInVMfromUnknown tiv;
+    assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
+    assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop");
+  }
+#endif
   int oop_index = oop_recorder()->find_index(obj);
   return AddressLiteral(obj, oop_Relocation::spec(oop_index));
 }
@@ -3703,7 +3708,7 @@
 // Called from init_globals() after universe_init() and before interpreter_init()
 void g1_barrier_stubs_init() {
   CollectedHeap* heap = Universe::heap();
-  if (heap->kind() == CollectedHeap::G1CollectedHeap) {
+  if (heap->kind() == CollectedHeap::G1) {
     // Only needed for G1
     if (dirty_card_log_enqueue == 0) {
       G1BarrierSet* bs =
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -199,41 +199,6 @@
 REGISTER_DECLARATION(Register, Oexception  , O0); // exception being thrown
 REGISTER_DECLARATION(Register, Oissuing_pc , O1); // where the exception is coming from
 
-
-// These must occur after the declarations above
-#ifndef DONT_USE_REGISTER_DEFINES
-
-#define Gthread             AS_REGISTER(Register, Gthread)
-#define Gmethod             AS_REGISTER(Register, Gmethod)
-#define Gmegamorphic_method AS_REGISTER(Register, Gmegamorphic_method)
-#define Ginline_cache_reg   AS_REGISTER(Register, Ginline_cache_reg)
-#define Gargs               AS_REGISTER(Register, Gargs)
-#define Lthread_cache       AS_REGISTER(Register, Lthread_cache)
-#define Gframe_size         AS_REGISTER(Register, Gframe_size)
-#define Gtemp               AS_REGISTER(Register, Gtemp)
-
-#define Lesp                AS_REGISTER(Register, Lesp)
-#define Lbcp                AS_REGISTER(Register, Lbcp)
-#define Lmethod             AS_REGISTER(Register, Lmethod)
-#define Llocals             AS_REGISTER(Register, Llocals)
-#define Lmonitors           AS_REGISTER(Register, Lmonitors)
-#define Lbyte_code          AS_REGISTER(Register, Lbyte_code)
-#define Lscratch            AS_REGISTER(Register, Lscratch)
-#define Lscratch2           AS_REGISTER(Register, Lscratch2)
-#define LcpoolCache         AS_REGISTER(Register, LcpoolCache)
-
-#define Lentry_args         AS_REGISTER(Register, Lentry_args)
-#define I5_savedSP          AS_REGISTER(Register, I5_savedSP)
-#define O5_savedSP          AS_REGISTER(Register, O5_savedSP)
-#define IdispatchAddress    AS_REGISTER(Register, IdispatchAddress)
-#define ImethodDataPtr      AS_REGISTER(Register, ImethodDataPtr)
-
-#define Oexception          AS_REGISTER(Register, Oexception)
-#define Oissuing_pc         AS_REGISTER(Register, Oissuing_pc)
-
-#endif
-
-
 // Address is an abstraction used to represent a memory location.
 //
 // Note: A register location is represented via a Register, not
--- a/src/hotspot/cpu/sparc/nativeInst_sparc.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/sparc/nativeInst_sparc.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,7 +26,6 @@
 #define CPU_SPARC_VM_NATIVEINST_SPARC_HPP
 
 #include "asm/macroAssembler.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 
--- a/src/hotspot/cpu/sparc/register_definitions_sparc.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/sparc/register_definitions_sparc.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -22,9 +22,6 @@
  *
  */
 
-// make sure the defines don't screw up the declarations later on in this file
-#define DONT_USE_REGISTER_DEFINES
-
 // Note: precompiled headers can not be used in this file because of the above
 //       definition
 
--- a/src/hotspot/cpu/sparc/register_sparc.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/sparc/register_sparc.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -154,62 +154,6 @@
 CONSTANT_REGISTER_DECLARATION(Register, FP    , (RegisterImpl::ibase + 6));
 CONSTANT_REGISTER_DECLARATION(Register, SP    , (RegisterImpl::obase + 6));
 
-//
-// Because sparc has so many registers, #define'ing values for the is
-// beneficial in code size and the cost of some of the dangers of
-// defines.  We don't use them on Intel because win32 uses asm
-// directives which use the same names for registers as Hotspot does,
-// so #defines would screw up the inline assembly.  If a particular
-// file has a problem with these defines then it's possible to turn
-// them off in that file by defining DONT_USE_REGISTER_DEFINES.
-// register_definition_sparc.cpp does that so that it's able to
-// provide real definitions of these registers for use in debuggers
-// and such.
-//
-
-#ifndef DONT_USE_REGISTER_DEFINES
-#define noreg ((Register)(noreg_RegisterEnumValue))
-
-#define G0 ((Register)(G0_RegisterEnumValue))
-#define G1 ((Register)(G1_RegisterEnumValue))
-#define G2 ((Register)(G2_RegisterEnumValue))
-#define G3 ((Register)(G3_RegisterEnumValue))
-#define G4 ((Register)(G4_RegisterEnumValue))
-#define G5 ((Register)(G5_RegisterEnumValue))
-#define G6 ((Register)(G6_RegisterEnumValue))
-#define G7 ((Register)(G7_RegisterEnumValue))
-
-#define O0 ((Register)(O0_RegisterEnumValue))
-#define O1 ((Register)(O1_RegisterEnumValue))
-#define O2 ((Register)(O2_RegisterEnumValue))
-#define O3 ((Register)(O3_RegisterEnumValue))
-#define O4 ((Register)(O4_RegisterEnumValue))
-#define O5 ((Register)(O5_RegisterEnumValue))
-#define O6 ((Register)(O6_RegisterEnumValue))
-#define O7 ((Register)(O7_RegisterEnumValue))
-
-#define L0 ((Register)(L0_RegisterEnumValue))
-#define L1 ((Register)(L1_RegisterEnumValue))
-#define L2 ((Register)(L2_RegisterEnumValue))
-#define L3 ((Register)(L3_RegisterEnumValue))
-#define L4 ((Register)(L4_RegisterEnumValue))
-#define L5 ((Register)(L5_RegisterEnumValue))
-#define L6 ((Register)(L6_RegisterEnumValue))
-#define L7 ((Register)(L7_RegisterEnumValue))
-
-#define I0 ((Register)(I0_RegisterEnumValue))
-#define I1 ((Register)(I1_RegisterEnumValue))
-#define I2 ((Register)(I2_RegisterEnumValue))
-#define I3 ((Register)(I3_RegisterEnumValue))
-#define I4 ((Register)(I4_RegisterEnumValue))
-#define I5 ((Register)(I5_RegisterEnumValue))
-#define I6 ((Register)(I6_RegisterEnumValue))
-#define I7 ((Register)(I7_RegisterEnumValue))
-
-#define FP ((Register)(FP_RegisterEnumValue))
-#define SP ((Register)(SP_RegisterEnumValue))
-#endif // DONT_USE_REGISTER_DEFINES
-
 // Use FloatRegister as shortcut
 class FloatRegisterImpl;
 typedef FloatRegisterImpl* FloatRegister;
@@ -321,59 +265,6 @@
 CONSTANT_REGISTER_DECLARATION(FloatRegister, F60    , (60));
 CONSTANT_REGISTER_DECLARATION(FloatRegister, F62    , (62));
 
-
-#ifndef DONT_USE_REGISTER_DEFINES
-#define fnoreg ((FloatRegister)(fnoreg_FloatRegisterEnumValue))
-#define F0     ((FloatRegister)(    F0_FloatRegisterEnumValue))
-#define F1     ((FloatRegister)(    F1_FloatRegisterEnumValue))
-#define F2     ((FloatRegister)(    F2_FloatRegisterEnumValue))
-#define F3     ((FloatRegister)(    F3_FloatRegisterEnumValue))
-#define F4     ((FloatRegister)(    F4_FloatRegisterEnumValue))
-#define F5     ((FloatRegister)(    F5_FloatRegisterEnumValue))
-#define F6     ((FloatRegister)(    F6_FloatRegisterEnumValue))
-#define F7     ((FloatRegister)(    F7_FloatRegisterEnumValue))
-#define F8     ((FloatRegister)(    F8_FloatRegisterEnumValue))
-#define F9     ((FloatRegister)(    F9_FloatRegisterEnumValue))
-#define F10    ((FloatRegister)(   F10_FloatRegisterEnumValue))
-#define F11    ((FloatRegister)(   F11_FloatRegisterEnumValue))
-#define F12    ((FloatRegister)(   F12_FloatRegisterEnumValue))
-#define F13    ((FloatRegister)(   F13_FloatRegisterEnumValue))
-#define F14    ((FloatRegister)(   F14_FloatRegisterEnumValue))
-#define F15    ((FloatRegister)(   F15_FloatRegisterEnumValue))
-#define F16    ((FloatRegister)(   F16_FloatRegisterEnumValue))
-#define F17    ((FloatRegister)(   F17_FloatRegisterEnumValue))
-#define F18    ((FloatRegister)(   F18_FloatRegisterEnumValue))
-#define F19    ((FloatRegister)(   F19_FloatRegisterEnumValue))
-#define F20    ((FloatRegister)(   F20_FloatRegisterEnumValue))
-#define F21    ((FloatRegister)(   F21_FloatRegisterEnumValue))
-#define F22    ((FloatRegister)(   F22_FloatRegisterEnumValue))
-#define F23    ((FloatRegister)(   F23_FloatRegisterEnumValue))
-#define F24    ((FloatRegister)(   F24_FloatRegisterEnumValue))
-#define F25    ((FloatRegister)(   F25_FloatRegisterEnumValue))
-#define F26    ((FloatRegister)(   F26_FloatRegisterEnumValue))
-#define F27    ((FloatRegister)(   F27_FloatRegisterEnumValue))
-#define F28    ((FloatRegister)(   F28_FloatRegisterEnumValue))
-#define F29    ((FloatRegister)(   F29_FloatRegisterEnumValue))
-#define F30    ((FloatRegister)(   F30_FloatRegisterEnumValue))
-#define F31    ((FloatRegister)(   F31_FloatRegisterEnumValue))
-#define F32    ((FloatRegister)(   F32_FloatRegisterEnumValue))
-#define F34    ((FloatRegister)(   F34_FloatRegisterEnumValue))
-#define F36    ((FloatRegister)(   F36_FloatRegisterEnumValue))
-#define F38    ((FloatRegister)(   F38_FloatRegisterEnumValue))
-#define F40    ((FloatRegister)(   F40_FloatRegisterEnumValue))
-#define F42    ((FloatRegister)(   F42_FloatRegisterEnumValue))
-#define F44    ((FloatRegister)(   F44_FloatRegisterEnumValue))
-#define F46    ((FloatRegister)(   F46_FloatRegisterEnumValue))
-#define F48    ((FloatRegister)(   F48_FloatRegisterEnumValue))
-#define F50    ((FloatRegister)(   F50_FloatRegisterEnumValue))
-#define F52    ((FloatRegister)(   F52_FloatRegisterEnumValue))
-#define F54    ((FloatRegister)(   F54_FloatRegisterEnumValue))
-#define F56    ((FloatRegister)(   F56_FloatRegisterEnumValue))
-#define F58    ((FloatRegister)(   F58_FloatRegisterEnumValue))
-#define F60    ((FloatRegister)(   F60_FloatRegisterEnumValue))
-#define F62    ((FloatRegister)(   F62_FloatRegisterEnumValue))
-#endif // DONT_USE_REGISTER_DEFINES
-
 // Maximum number of incoming arguments that can be passed in i registers.
 const int SPARC_ARGS_IN_REGS_NUM = 6;
 
--- a/src/hotspot/cpu/sparc/relocInfo_sparc.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/sparc/relocInfo_sparc.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,8 +26,9 @@
 #include "asm/assembler.hpp"
 #include "code/relocInfo.hpp"
 #include "nativeInst_sparc.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/oop.hpp"
 #include "runtime/safepoint.hpp"
 
 void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
@@ -97,7 +98,7 @@
     guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
     if (format() != 0) {
       assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type, "only narrow oops or klasses case");
-      jint np = type() == relocInfo::oop_type ? oopDesc::encode_heap_oop((oop)x) : Klass::encode_klass((Klass*)x);
+      jint np = type() == relocInfo::oop_type ? CompressedOops::encode((oop)x) : Klass::encode_klass((Klass*)x);
       inst &= ~Assembler::hi22(-1);
       inst |=  Assembler::hi22((intptr_t)np);
       if (verify_only) {
--- a/src/hotspot/cpu/x86/assembler_x86.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -4080,6 +4080,16 @@
   emit_operand(dst, src);
   emit_int8(mode & 0xFF);
 }
+void Assembler::evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
+  assert(VM_Version::supports_evex(), "requires EVEX support");
+  assert(vector_len == Assembler::AVX_256bit || vector_len == Assembler::AVX_512bit, "");
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+  attributes.set_is_evex_instruction();
+  int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+  emit_int8(0x43);
+  emit_int8((unsigned char)(0xC0 | encode));
+  emit_int8(imm8 & 0xFF);
+}
 
 void Assembler::psrldq(XMMRegister dst, int shift) {
   // Shift left 128 bit value in dst XMMRegister by shift number of bytes.
@@ -6201,6 +6211,27 @@
   emit_operand(dst, src);
 }
 
+void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+  assert(VM_Version::supports_evex(), "requires EVEX support");
+  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+  attributes.set_is_evex_instruction();
+  int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+  emit_int8((unsigned char)0xEF);
+  emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
+  assert(VM_Version::supports_evex(), "requires EVEX support");
+  assert(dst != xnoreg, "sanity");
+  InstructionMark im(this);
+  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+  attributes.set_is_evex_instruction();
+  attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
+  vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+  emit_int8((unsigned char)0xEF);
+  emit_operand(dst, src);
+}
+
 
 // vinserti forms
 
@@ -6786,6 +6817,16 @@
   emit_int8((unsigned char)mask);
 }
 
+void Assembler::evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len) {
+  assert(VM_Version::supports_vpclmulqdq(), "Requires vector carryless multiplication support");
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+  attributes.set_is_evex_instruction();
+  int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+  emit_int8(0x44);
+  emit_int8((unsigned char)(0xC0 | encode));
+  emit_int8((unsigned char)mask);
+}
+
 void Assembler::vzeroupper() {
   if (VM_Version::supports_vzeroupper()) {
     InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
--- a/src/hotspot/cpu/x86/assembler_x86.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/x86/assembler_x86.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1663,6 +1663,9 @@
   void pshuflw(XMMRegister dst, XMMRegister src, int mode);
   void pshuflw(XMMRegister dst, Address src,     int mode);
 
+  // Shuffle packed values at 128 bit granularity
+  void evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len);
+
   // Shift Right by bytes Logical DoubleQuadword Immediate
   void psrldq(XMMRegister dst, int shift);
   // Shift Left by bytes Logical DoubleQuadword Immediate
@@ -2046,6 +2049,9 @@
   void pxor(XMMRegister dst, XMMRegister src);
   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
   void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
+  void evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
+  void evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
+
 
   // vinserti forms
   void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
@@ -2108,7 +2114,7 @@
   // Carry-Less Multiplication Quadword
   void pclmulqdq(XMMRegister dst, XMMRegister src, int mask);
   void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask);
-
+  void evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len);
   // AVX instruction which is used to clear upper 128 bits of YMM registers and
   // to avoid transaction penalty between AVX and SSE states. There is no
   // penalty if legacy SSE instructions are encoded using VEX prefix because
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -10120,6 +10120,16 @@
 }
 
 /**
+* Fold four 128-bit data chunks
+*/
+void MacroAssembler::fold_128bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
+  evpclmulhdq(xtmp, xK, xcrc, Assembler::AVX_512bit); // [123:64]
+  evpclmulldq(xcrc, xK, xcrc, Assembler::AVX_512bit); // [63:0]
+  evpxorq(xcrc, xcrc, Address(buf, offset), Assembler::AVX_512bit /* vector_len */);
+  evpxorq(xcrc, xcrc, xtmp, Assembler::AVX_512bit /* vector_len */);
+}
+
+/**
  * Fold 128-bit data chunk
  */
 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
@@ -10224,6 +10234,34 @@
   shrl(len, 4);
   jcc(Assembler::zero, L_tail_restore);
 
+  // Fold total 512 bits of polynomial on each iteration
+  if (VM_Version::supports_vpclmulqdq()) {
+    Label Parallel_loop, L_No_Parallel;
+
+    cmpl(len, 8);
+    jccb(Assembler::less, L_No_Parallel);
+
+    movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32));
+    evmovdquq(xmm1, Address(buf, 0), Assembler::AVX_512bit);
+    movdl(xmm5, crc);
+    evpxorq(xmm1, xmm1, xmm5, Assembler::AVX_512bit);
+    addptr(buf, 64);
+    subl(len, 7);
+    evshufi64x2(xmm0, xmm0, xmm0, 0x00, Assembler::AVX_512bit); //propagate the mask from 128 bits to 512 bits
+
+    BIND(Parallel_loop);
+    fold_128bit_crc32_avx512(xmm1, xmm0, xmm5, buf, 0);
+    addptr(buf, 64);
+    subl(len, 4);
+    jcc(Assembler::greater, Parallel_loop);
+
+    vextracti64x2(xmm2, xmm1, 0x01);
+    vextracti64x2(xmm3, xmm1, 0x02);
+    vextracti64x2(xmm4, xmm1, 0x03);
+    jmp(L_fold_512b);
+
+    BIND(L_No_Parallel);
+  }
   // Fold crc into first bytes of vector
   movdqa(xmm1, Address(buf, 0));
   movdl(rax, xmm1);
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1498,6 +1498,14 @@
     // 0x11 - multiply upper 64 bits [64:127]
     Assembler::vpclmulqdq(dst, nds, src, 0x11);
   }
+  void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+    // 0x00 - multiply lower 64 bits [0:63]
+    Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len);
+  }
+  void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+    // 0x11 - multiply upper 64 bits [64:127]
+    Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len);
+  }
 
   // Data
 
@@ -1723,6 +1731,7 @@
   // Fold 8-bit data
   void fold_8bit_crc32(Register crc, Register table, Register tmp);
   void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
+  void fold_128bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
 
   // Compress char[] array to byte[].
   void char_array_compress(Register src, Register dst, Register len,
--- a/src/hotspot/cpu/x86/nativeInst_x86.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/x86/nativeInst_x86.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,7 +26,6 @@
 #define CPU_X86_VM_NATIVEINST_X86_HPP
 
 #include "asm/assembler.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 #include "runtime/safepointMechanism.hpp"
--- a/src/hotspot/cpu/x86/relocInfo_x86.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/x86/relocInfo_x86.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,6 +26,7 @@
 #include "asm/macroAssembler.hpp"
 #include "code/relocInfo.hpp"
 #include "nativeInst_x86.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/safepoint.hpp"
@@ -51,9 +52,9 @@
     // both compressed oops and compressed classes look the same
     if (Universe::heap()->is_in_reserved((oop)x)) {
     if (verify_only) {
-      guarantee(*(uint32_t*) disp == oopDesc::encode_heap_oop((oop)x), "instructions must match");
+      guarantee(*(uint32_t*) disp == CompressedOops::encode((oop)x), "instructions must match");
     } else {
-      *(int32_t*) disp = oopDesc::encode_heap_oop((oop)x);
+      *(int32_t*) disp = CompressedOops::encode((oop)x);
     }
   } else {
       if (verify_only) {
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,6 +27,7 @@
 #include "asm/macroAssembler.inline.hpp"
 #include "code/debugInfoRec.hpp"
 #include "code/icBuffer.hpp"
+#include "code/nativeInst.hpp"
 #include "code/vtableStubs.hpp"
 #include "gc/shared/gcLocker.hpp"
 #include "interpreter/interpreter.hpp"
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -41,6 +41,7 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/vframeArray.hpp"
 #include "utilities/align.hpp"
+#include "utilities/formatBuffer.hpp"
 #include "vm_version_x86.hpp"
 #include "vmreg_x86.inline.hpp"
 #ifdef COMPILER1
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -665,6 +665,7 @@
     _features &= ~CPU_AVX512BW;
     _features &= ~CPU_AVX512VL;
     _features &= ~CPU_AVX512_VPOPCNTDQ;
+    _features &= ~CPU_VPCLMULQDQ;
   }
 
   if (UseAVX < 2)
--- a/src/hotspot/cpu/x86/vm_version_x86.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/x86/vm_version_x86.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -334,6 +334,7 @@
 #define CPU_FMA ((uint64_t)UCONST64(0x800000000))      // FMA instructions
 #define CPU_VZEROUPPER ((uint64_t)UCONST64(0x1000000000))       // Vzeroupper instruction
 #define CPU_AVX512_VPOPCNTDQ ((uint64_t)UCONST64(0x2000000000)) // Vector popcount
+#define CPU_VPCLMULQDQ ((uint64_t)UCONST64(0x4000000000)) //Vector carryless multiplication
 
   enum Extended_Family {
     // AMD
@@ -542,6 +543,8 @@
           result |= CPU_AVX512VL;
         if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vpopcntdq != 0)
           result |= CPU_AVX512_VPOPCNTDQ;
+        if (_cpuid_info.sef_cpuid7_ecx.bits.vpclmulqdq != 0)
+          result |= CPU_VPCLMULQDQ;
       }
     }
     if(_cpuid_info.sef_cpuid7_ebx.bits.bmi1 != 0)
@@ -819,6 +822,7 @@
   static bool supports_fma()        { return (_features & CPU_FMA) != 0 && supports_avx(); }
   static bool supports_vzeroupper() { return (_features & CPU_VZEROUPPER) != 0; }
   static bool supports_vpopcntdq()  { return (_features & CPU_AVX512_VPOPCNTDQ) != 0; }
+  static bool supports_vpclmulqdq() { return (_features & CPU_VPCLMULQDQ) != 0; }
 
   // Intel features
   static bool is_intel_family_core() { return is_intel() &&
--- a/src/hotspot/cpu/zero/nativeInst_zero.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/cpu/zero/nativeInst_zero.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,7 +27,6 @@
 #define CPU_ZERO_VM_NATIVEINST_ZERO_HPP
 
 #include "asm/assembler.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 
--- a/src/hotspot/os/aix/attachListener_aix.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/os/aix/attachListener_aix.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "logging/log.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/os.inline.hpp"
 #include "services/attachListener.hpp"
--- a/src/hotspot/os/bsd/attachListener_bsd.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/os/bsd/attachListener_bsd.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "logging/log.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/os.inline.hpp"
 #include "services/attachListener.hpp"
--- a/src/hotspot/os/linux/attachListener_linux.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/os/linux/attachListener_linux.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/os.inline.hpp"
--- a/src/hotspot/os/linux/os_linux.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/os/linux/os_linux.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -152,6 +152,13 @@
 
 static int clock_tics_per_sec = 100;
 
+// If the VM might have been created on the primordial thread, we need to resolve the
+// primordial thread stack bounds and check if the current thread might be the
+// primordial thread in places. If we know that the primordial thread is never used,
+// such as when the VM was created by one of the standard java launchers, we can
+// avoid this
+static bool suppress_primordial_thread_resolution = false;
+
 // For diagnostics to print a message once. see run_periodic_checks
 static sigset_t check_signal_done;
 static bool check_signals = true;
@@ -917,6 +924,9 @@
 
 // Check if current thread is the primordial thread, similar to Solaris thr_main.
 bool os::is_primordial_thread(void) {
+  if (suppress_primordial_thread_resolution) {
+    return false;
+  }
   char dummy;
   // If called before init complete, thread stack bottom will be null.
   // Can be called if fatal error occurs before initialization.
@@ -1644,10 +1654,7 @@
         //
         // Dynamic loader will make all stacks executable after
         // this function returns, and will not do that again.
-#ifdef ASSERT
-        ThreadsListHandle tlh;
-        assert(tlh.length() == 0, "no Java threads should exist yet.");
-#endif
+        assert(Threads::number_of_threads() == 0, "no Java threads should exist yet.");
       } else {
         warning("You have loaded library %s which might have disabled stack guard. "
                 "The VM will try to fix the stack guard now.\n"
@@ -4936,7 +4943,11 @@
   if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
     return JNI_ERR;
   }
-  Linux::capture_initial_stack(JavaThread::stack_size_at_create());
+
+  suppress_primordial_thread_resolution = Arguments::created_by_java_launcher();
+  if (!suppress_primordial_thread_resolution) {
+    Linux::capture_initial_stack(JavaThread::stack_size_at_create());
+  }
 
 #if defined(IA32)
   workaround_expand_exec_shield_cs_limit();
--- a/src/hotspot/os/posix/os_posix.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/os/posix/os_posix.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -23,6 +23,7 @@
  */
 
 #include "jvm.h"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "runtime/frame.inline.hpp"
@@ -30,6 +31,7 @@
 #include "runtime/os.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/align.hpp"
+#include "utilities/formatBuffer.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/vmError.hpp"
 
--- a/src/hotspot/os/posix/vmError_posix.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/os/posix/vmError_posix.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,6 +27,7 @@
 #include "runtime/arguments.hpp"
 #include "runtime/os.hpp"
 #include "runtime/thread.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/vmError.hpp"
 
 #include <sys/types.h>
@@ -122,11 +123,20 @@
     pc = (address) info->si_addr;
   }
 
+  // Needed to make it possible to call SafeFetch.. APIs in error handling.
   if (uc && pc && StubRoutines::is_safefetch_fault(pc)) {
     os::Posix::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc));
     return;
   }
 
+  // Needed because asserts may happen in error handling too.
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
+    handle_assert_poison_fault(ucVoid, info->si_addr);
+    return;
+  }
+#endif // CAN_SHOW_REGISTERS_ON_ASSERT
+
   VMError::report_and_die(NULL, sig, pc, info, ucVoid);
 }
 
--- a/src/hotspot/os/solaris/attachListener_solaris.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/os/solaris/attachListener_solaris.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "logging/log.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/os.inline.hpp"
 #include "services/attachListener.hpp"
--- a/src/hotspot/os_cpu/aix_ppc/thread_aix_ppc.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/os_cpu/aix_ppc/thread_aix_ppc.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -24,7 +24,7 @@
  */
 
 #include "precompiled.hpp"
-#include "runtime/frame.hpp"
+#include "runtime/frame.inline.hpp"
 #include "runtime/thread.hpp"
 
 frame JavaThread::pd_last_frame() {
--- a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -32,6 +32,7 @@
 #include "code/icBuffer.hpp"
 #include "code/vtableStubs.hpp"
 #include "interpreter/interpreter.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "os_share_bsd.hpp"
 #include "prims/jniFastGetField.hpp"
--- a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -50,6 +50,7 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/events.hpp"
 #include "utilities/vmError.hpp"
 #ifdef BUILTIN_SIM
@@ -306,6 +307,13 @@
     }
   }
 
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
+    handle_assert_poison_fault(ucVoid, info->si_addr);
+    return 1;
+  }
+#endif
+
   JavaThread* thread = NULL;
   VMThread* vmthread = NULL;
   if (os::Linux::signal_handlers_are_installed) {
--- a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,6 +47,7 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/timer.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/events.hpp"
 #include "utilities/vmError.hpp"
 
@@ -311,6 +312,13 @@
     }
   }
 
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
+    handle_assert_poison_fault(ucVoid, info->si_addr);
+    return 1;
+  }
+#endif
+
   JavaThread* thread = NULL;
   VMThread* vmthread = NULL;
   if (os::Linux::signal_handlers_are_installed) {
--- a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -51,6 +51,7 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/events.hpp"
 #include "utilities/vmError.hpp"
 
@@ -266,6 +267,13 @@
     }
   }
 
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
+    handle_assert_poison_fault(ucVoid, info->si_addr);
+    return 1;
+  }
+#endif
+
   JavaThread* thread = NULL;
   VMThread* vmthread = NULL;
   if (os::Linux::signal_handlers_are_installed) {
--- a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -54,6 +54,7 @@
 #include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
 #include "utilities/events.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/vmError.hpp"
 
 // put OS-includes here
@@ -270,6 +271,13 @@
     }
   }
 
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
+    handle_assert_poison_fault(ucVoid, info->si_addr);
+    return 1;
+  }
+#endif
+
   JavaThread* thread = NULL;
   VMThread* vmthread = NULL;
   if (os::Linux::signal_handlers_are_installed) {
--- a/src/hotspot/os_cpu/linux_sparc/os_linux_sparc.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/os_cpu/linux_sparc/os_linux_sparc.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,6 +49,7 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/events.hpp"
 #include "utilities/vmError.hpp"
 
@@ -513,6 +514,13 @@
     }
   }
 
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
+    handle_assert_poison_fault(ucVoid, info->si_addr);
+    return 1;
+  }
+#endif
+
   JavaThread* thread = NULL;
   VMThread* vmthread = NULL;
   if (os::Linux::signal_handlers_are_installed) {
--- a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -32,6 +32,7 @@
 #include "code/icBuffer.hpp"
 #include "code/vtableStubs.hpp"
 #include "interpreter/interpreter.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "os_share_linux.hpp"
 #include "prims/jniFastGetField.hpp"
@@ -50,6 +51,7 @@
 #include "runtime/timer.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/align.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/events.hpp"
 #include "utilities/vmError.hpp"
 
@@ -303,6 +305,13 @@
     }
   }
 
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
+    handle_assert_poison_fault(ucVoid, info->si_addr);
+    return 1;
+  }
+#endif
+
   JavaThread* thread = NULL;
   VMThread* vmthread = NULL;
   if (os::Linux::signal_handlers_are_installed) {
--- a/src/hotspot/os_cpu/solaris_x86/os_solaris_x86.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/os_cpu/solaris_x86/os_solaris_x86.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -32,6 +32,7 @@
 #include "code/icBuffer.hpp"
 #include "code/vtableStubs.hpp"
 #include "interpreter/interpreter.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "os_share_solaris.hpp"
 #include "prims/jniFastGetField.hpp"
--- a/src/hotspot/share/aot/aotCodeHeap.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -29,8 +29,8 @@
 #include "classfile/javaAssertions.hpp"
 #include "gc/shared/cardTable.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
+#include "gc/shared/collectedHeap.hpp"
 #include "gc/g1/heapRegion.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "interpreter/abstractInterpreter.hpp"
 #include "jvmci/compilerRuntime.hpp"
 #include "jvmci/jvmciRuntime.hpp"
--- a/src/hotspot/share/aot/aotCompiledMethod.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/aot/aotCompiledMethod.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -32,7 +32,6 @@
 #include "compiler/compilerOracle.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
 #include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "jvmci/compilerRuntime.hpp"
 #include "jvmci/jvmciRuntime.hpp"
 #include "oops/method.inline.hpp"
@@ -40,6 +39,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
 #include "runtime/os.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "utilities/xmlstream.hpp"
 
--- a/src/hotspot/share/asm/codeBuffer.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/asm/codeBuffer.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -25,10 +25,10 @@
 #include "precompiled.hpp"
 #include "asm/codeBuffer.hpp"
 #include "compiler/disassembler.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "oops/methodData.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/icache.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "utilities/align.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/xmlstream.hpp"
--- a/src/hotspot/share/c1/c1_FpuStackSim.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/c1/c1_FpuStackSim.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
 #define SHARE_VM_C1_C1_FPUSTACKSIM_HPP
 
 #include "c1/c1_FrameMap.hpp"
-#include "memory/allocation.hpp"
 #include "utilities/macros.hpp"
 
 // Provides location for forward declaration of this class, which is
--- a/src/hotspot/share/c1/c1_Optimizer.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/c1/c1_Optimizer.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,7 +27,6 @@
 
 #include "c1/c1_IR.hpp"
 #include "c1/c1_Instruction.hpp"
-#include "memory/allocation.hpp"
 
 class Optimizer {
  private:
--- a/src/hotspot/share/ci/bcEscapeAnalyzer.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/ci/bcEscapeAnalyzer.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,8 +32,7 @@
 #include "oops/oop.inline.hpp"
 #include "utilities/align.hpp"
 #include "utilities/bitMap.inline.hpp"
-
-
+#include "utilities/copy.hpp"
 
 #ifndef PRODUCT
   #define TRACE_BCEA(level, code)                                            \
--- a/src/hotspot/share/ci/ciEnv.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/ci/ciEnv.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -57,6 +57,7 @@
 #include "runtime/init.hpp"
 #include "runtime/reflection.hpp"
 #include "runtime/jniHandles.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/thread.inline.hpp"
 #include "trace/tracing.hpp"
@@ -540,7 +541,7 @@
     // Calculate accessibility the hard way.
     if (!k->is_loaded()) {
       is_accessible = false;
-    } else if (k->loader() != accessor->loader() &&
+    } else if (!oopDesc::equals(k->loader(), accessor->loader()) &&
                get_klass_by_name_impl(accessor, cpool, k->name(), true) == NULL) {
       // Loaded only remotely.  Not linked yet.
       is_accessible = false;
@@ -591,7 +592,7 @@
     index = cpool->object_to_cp_index(cache_index);
     oop obj = cpool->resolved_references()->obj_at(cache_index);
     if (obj != NULL) {
-      if (obj == Universe::the_null_sentinel()) {
+      if (oopDesc::equals(obj, Universe::the_null_sentinel())) {
         return ciConstant(T_OBJECT, get_object(NULL));
       }
       BasicType bt = T_OBJECT;
--- a/src/hotspot/share/ci/ciFlags.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/ci/ciFlags.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,7 +27,6 @@
 
 #include "jvm.h"
 #include "ci/ciClassList.hpp"
-#include "memory/allocation.hpp"
 #include "utilities/accessFlags.hpp"
 #include "utilities/ostream.hpp"
 
--- a/src/hotspot/share/ci/ciMetadata.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/ci/ciMetadata.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,6 @@
 
 #include "ci/ciBaseObject.hpp"
 #include "ci/ciClassList.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/jniHandles.hpp"
 
--- a/src/hotspot/share/ci/ciObject.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/ci/ciObject.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,7 +27,6 @@
 
 #include "ci/ciBaseObject.hpp"
 #include "ci/ciClassList.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/jniHandles.hpp"
 
--- a/src/hotspot/share/ci/ciObjectFactory.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/ci/ciObjectFactory.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -249,7 +249,7 @@
   // into the cache.
   Handle keyHandle(Thread::current(), key);
   ciObject* new_object = create_new_object(keyHandle());
-  assert(keyHandle() == new_object->get_oop(), "must be properly recorded");
+  assert(oopDesc::equals(keyHandle(), new_object->get_oop()), "must be properly recorded");
   init_ident_of(new_object);
   assert(Universe::heap()->is_in_reserved(new_object->get_oop()), "must be");
 
@@ -450,8 +450,8 @@
   for (int i=0; i<_unloaded_klasses->length(); i++) {
     ciKlass* entry = _unloaded_klasses->at(i);
     if (entry->name()->equals(name) &&
-        entry->loader() == loader &&
-        entry->protection_domain() == domain) {
+        oopDesc::equals(entry->loader(), loader) &&
+        oopDesc::equals(entry->protection_domain(), domain)) {
       // We've found a match.
       return entry;
     }
--- a/src/hotspot/share/classfile/classFileParser.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/classfile/classFileParser.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -37,7 +37,6 @@
 #include "classfile/verificationType.hpp"
 #include "classfile/verifier.hpp"
 #include "classfile/vmSymbols.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/allocation.hpp"
@@ -62,6 +61,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/perfData.hpp"
 #include "runtime/reflection.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/timer.hpp"
 #include "services/classLoadingService.hpp"
@@ -69,6 +69,7 @@
 #include "trace/traceMacros.hpp"
 #include "utilities/align.hpp"
 #include "utilities/bitMap.inline.hpp"
+#include "utilities/copy.hpp"
 #include "utilities/exceptions.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/growableArray.hpp"
@@ -5423,6 +5424,8 @@
   // has to be changed accordingly.
   ik->set_initial_method_idnum(ik->methods()->length());
 
+  ik->set_this_class_index(_this_class_index);
+
   if (is_anonymous()) {
     // _this_class_index is a CONSTANT_Class entry that refers to this
     // anonymous class itself. If this class needs to refer to its own methods or
--- a/src/hotspot/share/classfile/classLoader.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/classfile/classLoader.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -64,7 +64,7 @@
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/java.hpp"
 #include "runtime/javaCalls.hpp"
-#include "runtime/os.hpp"
+#include "runtime/os.inline.hpp"
 #include "runtime/threadCritical.hpp"
 #include "runtime/timer.hpp"
 #include "runtime/vm_version.hpp"
@@ -148,8 +148,6 @@
 #if INCLUDE_CDS
 ClassPathEntry* ClassLoader::_app_classpath_entries = NULL;
 ClassPathEntry* ClassLoader::_last_app_classpath_entry = NULL;
-GrowableArray<char*>* ClassLoader::_boot_modules_array = NULL;
-GrowableArray<char*>* ClassLoader::_platform_modules_array = NULL;
 SharedPathsMiscInfo* ClassLoader::_shared_paths_misc_info = NULL;
 #endif
 
--- a/src/hotspot/share/classfile/classLoader.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/classfile/classLoader.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -233,12 +233,6 @@
   // Last entry in linked list of appended ClassPathEntry instances
   static ClassPathEntry* _last_append_entry;
 
-  // Array of module names associated with the boot class loader
-  CDS_ONLY(static GrowableArray<char*>* _boot_modules_array;)
-
-  // Array of module names associated with the platform class loader
-  CDS_ONLY(static GrowableArray<char*>* _platform_modules_array;)
-
   // Info used by CDS
   CDS_ONLY(static SharedPathsMiscInfo * _shared_paths_misc_info;)
 
--- a/src/hotspot/share/classfile/classLoaderData.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/classfile/classLoaderData.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -56,7 +56,6 @@
 #include "classfile/packageEntry.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/allocation.inline.hpp"
@@ -74,6 +73,7 @@
 #include "runtime/mutex.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/safepoint.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/synchronizer.hpp"
 #include "utilities/growableArray.hpp"
 #include "utilities/macros.hpp"
@@ -201,7 +201,7 @@
   VerifyContainsOopClosure(oop target) : _target(target), _found(false) {}
 
   void do_oop(oop* p) {
-    if (p != NULL && *p == _target) {
+    if (p != NULL && oopDesc::equals(RawAccess<>::oop_load(p), _target)) {
       _found = true;
     }
   }
@@ -380,7 +380,7 @@
 
     // Just return if this dependency is to a class with the same or a parent
     // class_loader.
-    if (from == to || java_lang_ClassLoader::isAncestor(from, to)) {
+    if (oopDesc::equals(from, to) || java_lang_ClassLoader::isAncestor(from, to)) {
       return; // this class loader is in the parent list, no need to add it.
     }
   }
@@ -1223,17 +1223,6 @@
   return array;
 }
 
-bool ClassLoaderDataGraph::unload_list_contains(const void* x) {
-  assert(SafepointSynchronize::is_at_safepoint(), "only safe to call at safepoint");
-  for (ClassLoaderData* cld = _unloading; cld != NULL; cld = cld->next()) {
-    // Needs fixing, see JDK-8199007.
-    if (cld->metaspace_or_null() != NULL && Metaspace::contains(x)) {
-      return true;
-    }
-  }
-  return false;
-}
-
 #ifndef PRODUCT
 bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
   for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
--- a/src/hotspot/share/classfile/classLoaderData.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/classfile/classLoaderData.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -155,8 +155,6 @@
   static void print() { print_on(tty); }
   static void verify();
 
-  static bool unload_list_contains(const void* x);
-
   // instance and array class counters
   static inline size_t num_instance_classes();
   static inline size_t num_array_classes();
--- a/src/hotspot/share/classfile/compactHashtable.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/classfile/compactHashtable.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -29,6 +29,7 @@
 #include "logging/logMessage.hpp"
 #include "memory/metadataFactory.hpp"
 #include "memory/metaspaceShared.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "runtime/vmThread.hpp"
 #include "utilities/numberSeq.hpp"
 #include <sys/stat.h>
@@ -182,7 +183,7 @@
 }
 
 void CompactStringTableWriter::add(unsigned int hash, oop string) {
-  CompactHashtableWriter::add(hash, oopDesc::encode_heap_oop(string));
+  CompactHashtableWriter::add(hash, CompressedOops::encode(string));
 }
 
 void CompactSymbolTableWriter::dump(CompactHashtable<Symbol*, char> *cht) {
--- a/src/hotspot/share/classfile/compactHashtable.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/classfile/compactHashtable.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,8 +26,10 @@
 #define SHARE_VM_CLASSFILE_COMPACTHASHTABLE_INLINE_HPP
 
 #include "classfile/compactHashtable.hpp"
+#include "classfile/javaClasses.hpp"
 #include "memory/allocation.inline.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "oops/oop.hpp"
 
 template <class T, class N>
 inline Symbol* CompactHashtable<T, N>::decode_entry(CompactHashtable<Symbol*, char>* const t,
@@ -45,7 +47,7 @@
 inline oop CompactHashtable<T, N>::decode_entry(CompactHashtable<oop, char>* const t,
                                                 u4 offset, const char* name, int len) {
   narrowOop obj = (narrowOop)offset;
-  oop string = oopDesc::decode_heap_oop(obj);
+  oop string = CompressedOops::decode(obj);
   if (java_lang_String::equals(string, (jchar*)name, len)) {
     return string;
   }
--- a/src/hotspot/share/classfile/defaultMethods.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/classfile/defaultMethods.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -884,6 +884,10 @@
   if (new_methods->length() > 0) {
     ConstantPool* cp = bpool->create_constant_pool(CHECK);
     if (cp != klass->constants()) {
+      // Copy resolved anonymous class into new constant pool.
+      if (klass->is_anonymous()) {
+        cp->klass_at_put(klass->this_class_index(), klass);
+      }
       klass->class_loader_data()->add_to_deallocate_list(klass->constants());
       klass->set_constants(cp);
       cp->set_pool_holder(klass);
--- a/src/hotspot/share/classfile/dictionary.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/classfile/dictionary.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -29,7 +29,6 @@
 #include "classfile/protectionDomainCache.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/systemDictionaryShared.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/iterator.hpp"
@@ -38,6 +37,7 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/orderAccess.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "utilities/hashtable.inline.hpp"
 
 // Optimization: if any dictionary needs resizing, we set this flag,
@@ -161,13 +161,13 @@
 
 bool DictionaryEntry::contains_protection_domain(oop protection_domain) const {
 #ifdef ASSERT
-  if (protection_domain == instance_klass()->protection_domain()) {
+  if (oopDesc::equals(protection_domain, instance_klass()->protection_domain())) {
     // Ensure this doesn't show up in the pd_set (invariant)
     bool in_pd_set = false;
     for (ProtectionDomainEntry* current = pd_set_acquire();
                                 current != NULL;
                                 current = current->next()) {
-      if (current->object_no_keepalive() == protection_domain) {
+      if (oopDesc::equals(current->object_no_keepalive(), protection_domain)) {
         in_pd_set = true;
         break;
       }
@@ -179,7 +179,7 @@
   }
 #endif /* ASSERT */
 
-  if (protection_domain == instance_klass()->protection_domain()) {
+  if (oopDesc::equals(protection_domain, instance_klass()->protection_domain())) {
     // Succeeds trivially
     return true;
   }
@@ -187,7 +187,7 @@
   for (ProtectionDomainEntry* current = pd_set_acquire();
                               current != NULL;
                               current = current->next()) {
-    if (current->object_no_keepalive() == protection_domain) return true;
+    if (oopDesc::equals(current->object_no_keepalive(), protection_domain)) return true;
   }
   return false;
 }
--- a/src/hotspot/share/classfile/javaClasses.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/classfile/javaClasses.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -33,6 +33,7 @@
 #include "code/dependencyContext.hpp"
 #include "code/pcDesc.hpp"
 #include "interpreter/interpreter.hpp"
+#include "interpreter/linkResolver.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/oopFactory.hpp"
@@ -57,6 +58,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/safepoint.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/vframe.inline.hpp"
 #include "utilities/align.hpp"
@@ -870,7 +872,7 @@
   } else {
     assert(Universe::is_module_initialized() ||
            (ModuleEntryTable::javabase_defined() &&
-            (module() == ModuleEntryTable::javabase_moduleEntry()->module())),
+            (oopDesc::equals(module(), ModuleEntryTable::javabase_moduleEntry()->module()))),
            "Incorrect java.lang.Module specification while creating mirror");
     set_module(mirror(), module());
   }
@@ -947,7 +949,7 @@
     }
 
     // set the classLoader field in the java_lang_Class instance
-    assert(class_loader() == k->class_loader(), "should be same");
+    assert(oopDesc::equals(class_loader(), k->class_loader()), "should be same");
     set_class_loader(mirror(), class_loader());
 
     // Setup indirection from klass->mirror
@@ -1461,9 +1463,9 @@
     // Note: create_basic_type_mirror above initializes ak to a non-null value.
     type = ArrayKlass::cast(ak)->element_type();
   } else {
-    assert(java_class == Universe::void_mirror(), "only valid non-array primitive");
+    assert(oopDesc::equals(java_class, Universe::void_mirror()), "only valid non-array primitive");
   }
-  assert(Universe::java_mirror(type) == java_class, "must be consistent");
+  assert(oopDesc::equals(Universe::java_mirror(type), java_class), "must be consistent");
   return type;
 }
 
@@ -3504,7 +3506,7 @@
 // Support for java_lang_ref_Reference
 
 bool java_lang_ref_Reference::is_referent_field(oop obj, ptrdiff_t offset) {
-  assert(!oopDesc::is_null(obj), "sanity");
+  assert(obj != NULL, "sanity");
   if (offset != java_lang_ref_Reference::referent_offset) {
     return false;
   }
@@ -3836,14 +3838,14 @@
 }
 
 bool java_lang_invoke_MethodType::equals(oop mt1, oop mt2) {
-  if (mt1 == mt2)
+  if (oopDesc::equals(mt1, mt2))
     return true;
-  if (rtype(mt1) != rtype(mt2))
+  if (!oopDesc::equals(rtype(mt1), rtype(mt2)))
     return false;
   if (ptype_count(mt1) != ptype_count(mt2))
     return false;
   for (int i = ptype_count(mt1) - 1; i >= 0; i--) {
-    if (ptype(mt1, i) != ptype(mt2, i))
+    if (!oopDesc::equals(ptype(mt1, i), ptype(mt2, i)))
       return false;
   }
   return true;
@@ -4041,7 +4043,7 @@
   // This loop taken verbatim from ClassLoader.java:
   do {
     acl = parent(acl);
-    if (cl == acl) {
+    if (oopDesc::equals(cl, acl)) {
       return true;
     }
     assert(++loop_count > 0, "loop_count overflow");
@@ -4071,7 +4073,7 @@
 
   oop cl = SystemDictionary::java_system_loader();
   while(cl != NULL) {
-    if (cl == loader) return true;
+    if (oopDesc::equals(cl, loader)) return true;
     cl = parent(cl);
   }
   return false;
@@ -4131,7 +4133,7 @@
 bool java_lang_System::has_security_manager() {
   InstanceKlass* ik = SystemDictionary::System_klass();
   oop base = ik->static_field_base_raw();
-  return !oopDesc::is_null(base->obj_field(static_security_offset));
+  return base->obj_field(static_security_offset) != NULL;
 }
 
 int java_lang_Class::_klass_offset;
--- a/src/hotspot/share/classfile/protectionDomainCache.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/classfile/protectionDomainCache.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -132,7 +132,7 @@
 
 ProtectionDomainCacheEntry* ProtectionDomainCacheTable::find_entry(int index, Handle protection_domain) {
   for (ProtectionDomainCacheEntry* e = bucket(index); e != NULL; e = e->next()) {
-    if (e->object_no_keepalive() == protection_domain()) {
+    if (oopDesc::equals(e->object_no_keepalive(), protection_domain())) {
       return e;
     }
   }
--- a/src/hotspot/share/classfile/sharedPathsMiscInfo.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/classfile/sharedPathsMiscInfo.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
 #include "memory/metaspaceShared.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/arguments.hpp"
+#include "runtime/os.inline.hpp"
 #include "utilities/ostream.hpp"
 
 SharedPathsMiscInfo::SharedPathsMiscInfo() {
--- a/src/hotspot/share/classfile/stringTable.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/classfile/stringTable.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -29,7 +29,6 @@
 #include "classfile/stringTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
 #include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/filemap.hpp"
@@ -41,6 +40,7 @@
 #include "runtime/atomic.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "services/diagnosticCommand.hpp"
 #include "utilities/hashtable.inline.hpp"
 #include "utilities/macros.hpp"
--- a/src/hotspot/share/classfile/stringTable.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/classfile/stringTable.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_CLASSFILE_STRINGTABLE_HPP
 #define SHARE_VM_CLASSFILE_STRINGTABLE_HPP
 
-#include "memory/allocation.hpp"
 #include "utilities/hashtable.hpp"
 
 template <class T, class N> class CompactHashtable;
--- a/src/hotspot/share/classfile/symbolTable.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/classfile/symbolTable.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -29,7 +29,7 @@
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/filemap.hpp"
 #include "memory/metaspaceClosure.hpp"
@@ -37,6 +37,7 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "services/diagnosticCommand.hpp"
 #include "utilities/hashtable.inline.hpp"
 
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -43,7 +43,6 @@
 #include "classfile/vmSymbols.hpp"
 #include "code/codeCache.hpp"
 #include "compiler/compileBroker.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/interpreter.hpp"
@@ -53,6 +52,7 @@
 #include "memory/metaspaceClosure.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/instanceRefKlass.hpp"
 #include "oops/klass.inline.hpp"
@@ -75,6 +75,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/orderAccess.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
 #include "services/classLoadingService.hpp"
 #include "services/diagnosticCommand.hpp"
@@ -181,7 +182,7 @@
     return false;
   }
   return (class_loader->klass() == SystemDictionary::jdk_internal_loader_ClassLoaders_AppClassLoader_klass() ||
-       class_loader == _java_system_loader);
+         oopDesc::equals(class_loader, _java_system_loader));
 }
 
 // Returns true if the passed class loader is the platform class loader.
@@ -390,7 +391,7 @@
        ((quicksuperk = childk->super()) != NULL) &&
 
          ((quicksuperk->name() == class_name) &&
-            (quicksuperk->class_loader()  == class_loader()))) {
+            (oopDesc::equals(quicksuperk->class_loader(), class_loader())))) {
            return quicksuperk;
     } else {
       PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, child_name, loader_data);
@@ -524,7 +525,7 @@
   bool calledholdinglock
       = ObjectSynchronizer::current_thread_holds_lock((JavaThread*)THREAD, lockObject);
   assert(calledholdinglock,"must hold lock for notify");
-  assert((!(lockObject() == _system_loader_lock_obj) && !is_parallelCapable(lockObject)), "unexpected double_lock_wait");
+  assert((!oopDesc::equals(lockObject(), _system_loader_lock_obj) && !is_parallelCapable(lockObject)), "unexpected double_lock_wait");
   ObjectSynchronizer::notifyall(lockObject, THREAD);
   intptr_t recursions =  ObjectSynchronizer::complete_exit(lockObject, THREAD);
   SystemDictionary_lock->wait();
@@ -842,7 +843,7 @@
       // If everything was OK (no exceptions, no null return value), and
       // class_loader is NOT the defining loader, do a little more bookkeeping.
       if (!HAS_PENDING_EXCEPTION && k != NULL &&
-        k->class_loader() != class_loader()) {
+        !oopDesc::equals(k->class_loader(), class_loader())) {
 
         check_constraints(d_hash, k, class_loader, false, THREAD);
 
@@ -988,7 +989,7 @@
   if (host_klass != NULL) {
     // Create a new CLD for anonymous class, that uses the same class loader
     // as the host_klass
-    guarantee(host_klass->class_loader() == class_loader(), "should be the same");
+    guarantee(oopDesc::equals(host_klass->class_loader(), class_loader()), "should be the same");
     loader_data = ClassLoaderData::anonymous_class_loader_data(class_loader);
   } else {
     loader_data = ClassLoaderData::class_loader_data(class_loader());
@@ -1746,7 +1747,7 @@
       == ObjectSynchronizer::owner_other) {
     // contention will likely happen, so increment the corresponding
     // contention counter.
-    if (loader_lock() == _system_loader_lock_obj) {
+    if (oopDesc::equals(loader_lock(), _system_loader_lock_obj)) {
       ClassLoader::sync_systemLoaderLockContentionRate()->inc();
     } else {
       ClassLoader::sync_nonSystemLoaderLockContentionRate()->inc();
@@ -1829,7 +1830,7 @@
   BoolObjectClosure* _is_alive;
 
   template <class T> void do_oop_work(T* p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
+    oop obj = RawAccess<>::oop_load(p);
     guarantee(_is_alive->do_object_b(obj), "Oop in protection domain cache table must be live");
   }
 
@@ -2228,7 +2229,7 @@
       // cleared if revocation occurs too often for this type
       // NOTE that we must only do this when the class is initally
       // defined, not each time it is referenced from a new class loader
-      if (k->class_loader() == class_loader()) {
+      if (oopDesc::equals(k->class_loader(), class_loader())) {
         k->set_prototype_header(markOopDesc::biased_locking_prototype());
       }
     }
@@ -2420,7 +2421,7 @@
                                                Handle loader1, Handle loader2,
                                                bool is_method, TRAPS)  {
   // Nothing to do if loaders are the same.
-  if (loader1() == loader2()) {
+  if (oopDesc::equals(loader1(), loader2())) {
     return NULL;
   }
 
@@ -2699,7 +2700,7 @@
       mirror = ss.as_java_mirror(class_loader, protection_domain,
                                  SignatureStream::NCDFError, CHECK_(empty));
     }
-    assert(!oopDesc::is_null(mirror), "%s", ss.as_symbol(THREAD)->as_C_string());
+    assert(mirror != NULL, "%s", ss.as_symbol(THREAD)->as_C_string());
     if (ss.at_return_type())
       rt = Handle(THREAD, mirror);
     else
@@ -2793,7 +2794,7 @@
     // which MemberName resolution doesn't handle. There's special logic on JDK side to handle them
     // (see MethodHandles.linkMethodHandleConstant() and MethodHandles.findVirtualForMH()).
   } else {
-    MethodHandles::resolve_MemberName(mname, caller, CHECK_(empty));
+    MethodHandles::resolve_MemberName(mname, caller, /*speculative_resolve*/false, CHECK_(empty));
   }
 
   // After method/field resolution succeeded, it's safe to resolve MH signature as well.
--- a/src/hotspot/share/classfile/verificationType.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/classfile/verificationType.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,6 +27,7 @@
 #include "classfile/systemDictionaryShared.hpp"
 #include "classfile/verificationType.hpp"
 #include "classfile/verifier.hpp"
+#include "logging/log.hpp"
 #include "runtime/handles.inline.hpp"
 
 VerificationType VerificationType::from_tag(u1 tag) {
--- a/src/hotspot/share/classfile/verificationType.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/classfile/verificationType.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,7 +26,6 @@
 #define SHARE_VM_CLASSFILE_VERIFICATIONTYPE_HPP
 
 #include "classfile/systemDictionary.hpp"
-#include "memory/allocation.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/oop.hpp"
 #include "oops/symbol.hpp"
--- a/src/hotspot/share/classfile/verifier.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/classfile/verifier.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -49,6 +49,7 @@
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/thread.hpp"
 #include "services/threadService.hpp"
 #include "utilities/align.hpp"
--- a/src/hotspot/share/classfile/verifier.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/classfile/verifier.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,7 +26,6 @@
 #define SHARE_VM_CLASSFILE_VERIFIER_HPP
 
 #include "classfile/verificationType.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "oops/klass.hpp"
 #include "oops/method.hpp"
 #include "runtime/handles.hpp"
--- a/src/hotspot/share/code/codeBlob.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/code/codeBlob.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -294,6 +294,28 @@
   return blob;
 }
 
+VtableBlob::VtableBlob(const char* name, int size) :
+  BufferBlob(name, size) {
+}
+
+VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
+  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+
+  VtableBlob* blob = NULL;
+  unsigned int size = sizeof(VtableBlob);
+  // align the size to CodeEntryAlignment
+  size = align_code_offset(size);
+  size += align_up(buffer_size, oopSize);
+  assert(name != NULL, "must provide a name");
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    blob = new (size) VtableBlob(name, size);
+  }
+  // Track memory usage statistic after releasing CodeCache_lock
+  MemoryService::track_code_cache_memory_usage();
+
+  return blob;
+}
 
 //----------------------------------------------------------------------------------------------------
 // Implementation of MethodHandlesAdapterBlob
--- a/src/hotspot/share/code/codeBlob.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/code/codeBlob.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -58,6 +58,7 @@
 //  RuntimeBlob          : Non-compiled method code; generated glue code
 //   BufferBlob          : Used for non-relocatable code such as interpreter, stubroutines, etc.
 //    AdapterBlob        : Used to hold C2I/I2C adapters
+//    VtableBlob         : Used for holding vtable chunks
 //    MethodHandlesAdapterBlob : Used to hold MethodHandles adapters
 //   RuntimeStub         : Call to VM runtime methods
 //   SingletonBlob       : Super-class for all blobs that exist in only one instance
@@ -132,6 +133,7 @@
   virtual bool is_exception_stub() const              { return false; }
   virtual bool is_safepoint_stub() const              { return false; }
   virtual bool is_adapter_blob() const                { return false; }
+  virtual bool is_vtable_blob() const                 { return false; }
   virtual bool is_method_handles_adapter_blob() const { return false; }
   virtual bool is_aot() const                         { return false; }
   virtual bool is_compiled() const                    { return false; }
@@ -380,6 +382,7 @@
 class BufferBlob: public RuntimeBlob {
   friend class VMStructs;
   friend class AdapterBlob;
+  friend class VtableBlob;
   friend class MethodHandlesAdapterBlob;
   friend class WhiteBox;
 
@@ -425,6 +428,18 @@
   virtual bool is_adapter_blob() const { return true; }
 };
 
+//---------------------------------------------------------------------------------------------------
+class VtableBlob: public BufferBlob {
+private:
+  VtableBlob(const char*, int);
+
+public:
+  // Creation
+  static VtableBlob* create(const char* name, int buffer_size);
+
+  // Typing
+  virtual bool is_vtable_blob() const { return true; }
+};
 
 //----------------------------------------------------------------------------------------------------
 // MethodHandlesAdapterBlob: used to hold MethodHandles adapters
--- a/src/hotspot/share/code/codeCache.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/code/codeCache.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,13 +26,15 @@
 #include "aot/aotLoader.hpp"
 #include "code/codeBlob.hpp"
 #include "code/codeCache.hpp"
+#include "code/codeHeapState.hpp"
 #include "code/compiledIC.hpp"
 #include "code/dependencies.hpp"
 #include "code/icBuffer.hpp"
 #include "code/nmethod.hpp"
 #include "code/pcDesc.hpp"
 #include "compiler/compileBroker.hpp"
-#include "gc/shared/gcLocker.hpp"
+#include "logging/log.hpp"
+#include "logging/logStream.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
@@ -47,6 +49,7 @@
 #include "runtime/icache.hpp"
 #include "runtime/java.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/sweeper.hpp"
 #include "services/memoryService.hpp"
 #include "trace/tracing.hpp"
@@ -1363,8 +1366,17 @@
       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
       print_summary(&s);
     }
-    ttyLocker ttyl;
-    tty->print("%s", s.as_string());
+    {
+      ttyLocker ttyl;
+      tty->print("%s", s.as_string());
+    }
+
+    if (heap->full_count() == 0) {
+      LogTarget(Debug, codecache) lt;
+      if (lt.is_enabled()) {
+        CompileBroker::print_heapinfo(tty, "all", "4096"); // details, may be a lot!
+      }
+    }
   }
 
   heap->report_full();
@@ -1639,3 +1651,54 @@
             blob_count(), nmethod_count(), adapter_count(),
             unallocated_capacity());
 }
+
+//---<  BEGIN  >--- CodeHeap State Analytics.
+
+void CodeCache::aggregate(outputStream *out, const char* granularity) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::aggregate(out, (*heap), granularity);
+  }
+}
+
+void CodeCache::discard(outputStream *out) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::discard(out, (*heap));
+  }
+}
+
+void CodeCache::print_usedSpace(outputStream *out) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::print_usedSpace(out, (*heap));
+  }
+}
+
+void CodeCache::print_freeSpace(outputStream *out) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::print_freeSpace(out, (*heap));
+  }
+}
+
+void CodeCache::print_count(outputStream *out) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::print_count(out, (*heap));
+  }
+}
+
+void CodeCache::print_space(outputStream *out) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::print_space(out, (*heap));
+  }
+}
+
+void CodeCache::print_age(outputStream *out) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::print_age(out, (*heap));
+  }
+}
+
+void CodeCache::print_names(outputStream *out) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::print_names(out, (*heap));
+  }
+}
+//---<  END  >--- CodeHeap State Analytics.
--- a/src/hotspot/share/code/codeCache.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/code/codeCache.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -296,6 +296,17 @@
     CodeHeap* heap = get_code_heap(code_blob_type);
     return (heap != NULL) ? heap->full_count() : 0;
   }
+
+  // CodeHeap State Analytics.
+  // interface methods for CodeHeap printing, called by CompileBroker
+  static void aggregate(outputStream *out, const char* granularity);
+  static void discard(outputStream *out);
+  static void print_usedSpace(outputStream *out);
+  static void print_freeSpace(outputStream *out);
+  static void print_count(outputStream *out);
+  static void print_space(outputStream *out);
+  static void print_age(outputStream *out);
+  static void print_names(outputStream *out);
 };
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/code/codeHeapState.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,2338 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "code/codeHeapState.hpp"
+#include "compiler/compileBroker.hpp"
+#include "runtime/sweeper.hpp"
+
+// -------------------------
+// |  General Description  |
+// -------------------------
+// The CodeHeap state analytics are divided in two parts.
+// The first part examines the entire CodeHeap and aggregates all
+// information that is believed useful/important.
+//
+// Aggregation condenses the information of a piece of the CodeHeap
+// (4096 bytes by default) into an analysis granule. These granules
+// contain enough detail to gain initial insight while keeping the
+// internal sttructure sizes in check.
+//
+// The CodeHeap is a living thing. Therefore, the aggregate is collected
+// under the CodeCache_lock. The subsequent print steps are only locked
+// against concurrent aggregations. That keeps the impact on
+// "normal operation" (JIT compiler and sweeper activity) to a minimum.
+//
+// The second part, which consists of several, independent steps,
+// prints the previously collected information with emphasis on
+// various aspects.
+//
+// Data collection and printing is done on an "on request" basis.
+// While no request is being processed, there is no impact on performance.
+// The CodeHeap state analytics do have some memory footprint.
+// The "aggregate" step allocates some data structures to hold the aggregated
+// information for later output. These data structures live until they are
+// explicitly discarded (function "discard") or until the VM terminates.
+// There is one exception: the function "all" does not leave any data
+// structures allocated.
+//
+// Requests for real-time, on-the-fly analysis can be issued via
+//   jcmd <pid> Compiler.CodeHeap_Analytics [<function>] [<granularity>]
+//
+// If you are (only) interested in how the CodeHeap looks like after running
+// a sample workload, you can use the command line option
+//   -Xlog:codecache=Trace
+//
+// To see the CodeHeap state in case of a "CodeCache full" condition, start the
+// VM with the
+//   -Xlog:codecache=Debug
+// command line option. It will produce output only for the first time the
+// condition is recognized.
+//
+// Both command line option variants produce output identical to the jcmd function
+//   jcmd <pid> Compiler.CodeHeap_Analytics all 4096
+// ---------------------------------------------------------------------------------
+
+// With this declaration macro, it is possible to switch between
+//  - direct output into an argument-passed outputStream and
+//  - buffered output into a bufferedStream with subsequent flush
+//    of the filled buffer to the outputStream.
+#define USE_STRINGSTREAM
+#define HEX32_FORMAT  "0x%x"  // just a helper format string used below multiple times
+//
+// Writing to a bufferedStream buffer first has a significant advantage:
+// It uses noticeably less cpu cycles and reduces (when wirting to a
+// network file) the required bandwidth by at least a factor of ten.
+// That clearly makes up for the increased code complexity.
+#if defined(USE_STRINGSTREAM)
+#define STRINGSTREAM_DECL(_anyst, _outst)                 \
+    /* _anyst  name of the stream as used in the code */  \
+    /* _outst  stream where final output will go to   */  \
+    ResourceMark rm;                                      \
+    bufferedStream   _sstobj = bufferedStream(4*K);       \
+    bufferedStream*  _sstbuf = &_sstobj;                  \
+    outputStream*    _outbuf = _outst;                    \
+    bufferedStream*  _anyst  = &_sstobj; /* any stream. Use this to just print - no buffer flush.  */
+
+#define STRINGSTREAM_FLUSH(termString)                    \
+    _sstbuf->print("%s", termString);                     \
+    _outbuf->print("%s", _sstbuf->as_string());           \
+    _sstbuf->reset();
+
+#define STRINGSTREAM_FLUSH_LOCKED(termString)             \
+    { ttyLocker ttyl;/* keep this output block together */\
+      STRINGSTREAM_FLUSH(termString)                      \
+    }
+#else
+#define STRINGSTREAM_DECL(_anyst, _outst)                 \
+    outputStream*  _outbuf = _outst;                      \
+    outputStream*  _anyst  = _outst;   /* any stream. Use this to just print - no buffer flush.  */
+
+#define STRINGSTREAM_FLUSH(termString)                    \
+    _outbuf->print("%s", termString);
+
+#define STRINGSTREAM_FLUSH_LOCKED(termString)             \
+    _outbuf->print("%s", termString);
+#endif
+
+const char  blobTypeChar[] = {' ', 'N', 'I', 'X', 'Z', 'U', 'R', '?', 'D', 'T', 'E', 'S', 'A', 'M', 'B', 'L' };
+const char* blobTypeName[] = {"noType"
+                             ,     "nMethod (active)"
+                             ,          "nMethod (inactive)"
+                             ,               "nMethod (deopt)"
+                             ,                    "nMethod (zombie)"
+                             ,                         "nMethod (unloaded)"
+                             ,                              "runtime stub"
+                             ,                                   "ricochet stub"
+                             ,                                        "deopt stub"
+                             ,                                             "uncommon trap stub"
+                             ,                                                  "exception stub"
+                             ,                                                       "safepoint stub"
+                             ,                                                            "adapter blob"
+                             ,                                                                 "MH adapter blob"
+                             ,                                                                      "buffer blob"
+                             ,                                                                           "lastType"
+                             };
+const char* compTypeName[] = { "none", "c1", "c2", "jvmci" };
+
+// Be prepared for ten different CodeHeap segments. Should be enough for a few years.
+const  unsigned int        nSizeDistElements = 31;  // logarithmic range growth, max size: 2**32
+const  unsigned int        maxTopSizeBlocks  = 50;
+const  unsigned int        tsbStopper        = 2 * maxTopSizeBlocks;
+const  unsigned int        maxHeaps          = 10;
+static unsigned int        nHeaps            = 0;
+static struct CodeHeapStat CodeHeapStatArray[maxHeaps];
+
+// static struct StatElement *StatArray      = NULL;
+static StatElement* StatArray             = NULL;
+static int          log2_seg_size         = 0;
+static size_t       seg_size              = 0;
+static size_t       alloc_granules        = 0;
+static size_t       granule_size          = 0;
+static bool         segment_granules      = false;
+static unsigned int nBlocks_t1            = 0;  // counting "in_use" nmethods only.
+static unsigned int nBlocks_t2            = 0;  // counting "in_use" nmethods only.
+static unsigned int nBlocks_alive         = 0;  // counting "not_used" and "not_entrant" nmethods only.
+static unsigned int nBlocks_dead          = 0;  // counting "zombie" and "unloaded" methods only.
+static unsigned int nBlocks_unloaded      = 0;  // counting "unloaded" nmethods only. This is a transien state.
+static unsigned int nBlocks_stub          = 0;
+
+static struct FreeBlk*          FreeArray = NULL;
+static unsigned int      alloc_freeBlocks = 0;
+
+static struct TopSizeBlk*    TopSizeArray = NULL;
+static unsigned int   alloc_topSizeBlocks = 0;
+static unsigned int    used_topSizeBlocks = 0;
+
+static struct SizeDistributionElement*  SizeDistributionArray = NULL;
+
+// nMethod temperature (hotness) indicators.
+static int                     avgTemp    = 0;
+static int                     maxTemp    = 0;
+static int                     minTemp    = 0;
+
+static unsigned int  latest_compilation_id   = 0;
+static volatile bool initialization_complete = false;
+
+const char* CodeHeapState::get_heapName(CodeHeap* heap) {
+  if (SegmentedCodeCache) {
+    return heap->name();
+  } else {
+    return "CodeHeap";
+  }
+}
+
+// returns the index for the heap being processed.
+unsigned int CodeHeapState::findHeapIndex(outputStream* out, const char* heapName) {
+  if (heapName == NULL) {
+    return maxHeaps;
+  }
+  if (SegmentedCodeCache) {
+    // Search for a pre-existing entry. If found, return that index.
+    for (unsigned int i = 0; i < nHeaps; i++) {
+      if (CodeHeapStatArray[i].heapName != NULL && strcmp(heapName, CodeHeapStatArray[i].heapName) == 0) {
+        return i;
+      }
+    }
+
+    // check if there are more code heap segments than we can handle.
+    if (nHeaps == maxHeaps) {
+      out->print_cr("Too many heap segments for current limit(%d).", maxHeaps);
+      return maxHeaps;
+    }
+
+    // allocate new slot in StatArray.
+    CodeHeapStatArray[nHeaps].heapName = heapName;
+    return nHeaps++;
+  } else {
+    nHeaps = 1;
+    CodeHeapStatArray[0].heapName = heapName;
+    return 0; // This is the default index if CodeCache is not segmented.
+  }
+}
+
+void CodeHeapState::get_HeapStatGlobals(outputStream* out, const char* heapName) {
+  unsigned int ix = findHeapIndex(out, heapName);
+  if (ix < maxHeaps) {
+    StatArray             = CodeHeapStatArray[ix].StatArray;
+    seg_size              = CodeHeapStatArray[ix].segment_size;
+    log2_seg_size         = seg_size == 0 ? 0 : exact_log2(seg_size);
+    alloc_granules        = CodeHeapStatArray[ix].alloc_granules;
+    granule_size          = CodeHeapStatArray[ix].granule_size;
+    segment_granules      = CodeHeapStatArray[ix].segment_granules;
+    nBlocks_t1            = CodeHeapStatArray[ix].nBlocks_t1;
+    nBlocks_t2            = CodeHeapStatArray[ix].nBlocks_t2;
+    nBlocks_alive         = CodeHeapStatArray[ix].nBlocks_alive;
+    nBlocks_dead          = CodeHeapStatArray[ix].nBlocks_dead;
+    nBlocks_unloaded      = CodeHeapStatArray[ix].nBlocks_unloaded;
+    nBlocks_stub          = CodeHeapStatArray[ix].nBlocks_stub;
+    FreeArray             = CodeHeapStatArray[ix].FreeArray;
+    alloc_freeBlocks      = CodeHeapStatArray[ix].alloc_freeBlocks;
+    TopSizeArray          = CodeHeapStatArray[ix].TopSizeArray;
+    alloc_topSizeBlocks   = CodeHeapStatArray[ix].alloc_topSizeBlocks;
+    used_topSizeBlocks    = CodeHeapStatArray[ix].used_topSizeBlocks;
+    SizeDistributionArray = CodeHeapStatArray[ix].SizeDistributionArray;
+    avgTemp               = CodeHeapStatArray[ix].avgTemp;
+    maxTemp               = CodeHeapStatArray[ix].maxTemp;
+    minTemp               = CodeHeapStatArray[ix].minTemp;
+  } else {
+    StatArray             = NULL;
+    seg_size              = 0;
+    log2_seg_size         = 0;
+    alloc_granules        = 0;
+    granule_size          = 0;
+    segment_granules      = false;
+    nBlocks_t1            = 0;
+    nBlocks_t2            = 0;
+    nBlocks_alive         = 0;
+    nBlocks_dead          = 0;
+    nBlocks_unloaded      = 0;
+    nBlocks_stub          = 0;
+    FreeArray             = NULL;
+    alloc_freeBlocks      = 0;
+    TopSizeArray          = NULL;
+    alloc_topSizeBlocks   = 0;
+    used_topSizeBlocks    = 0;
+    SizeDistributionArray = NULL;
+    avgTemp               = 0;
+    maxTemp               = 0;
+    minTemp               = 0;
+  }
+}
+
+void CodeHeapState::set_HeapStatGlobals(outputStream* out, const char* heapName) {
+  unsigned int ix = findHeapIndex(out, heapName);
+  if (ix < maxHeaps) {
+    CodeHeapStatArray[ix].StatArray             = StatArray;
+    CodeHeapStatArray[ix].segment_size          = seg_size;
+    CodeHeapStatArray[ix].alloc_granules        = alloc_granules;
+    CodeHeapStatArray[ix].granule_size          = granule_size;
+    CodeHeapStatArray[ix].segment_granules      = segment_granules;
+    CodeHeapStatArray[ix].nBlocks_t1            = nBlocks_t1;
+    CodeHeapStatArray[ix].nBlocks_t2            = nBlocks_t2;
+    CodeHeapStatArray[ix].nBlocks_alive         = nBlocks_alive;
+    CodeHeapStatArray[ix].nBlocks_dead          = nBlocks_dead;
+    CodeHeapStatArray[ix].nBlocks_unloaded      = nBlocks_unloaded;
+    CodeHeapStatArray[ix].nBlocks_stub          = nBlocks_stub;
+    CodeHeapStatArray[ix].FreeArray             = FreeArray;
+    CodeHeapStatArray[ix].alloc_freeBlocks      = alloc_freeBlocks;
+    CodeHeapStatArray[ix].TopSizeArray          = TopSizeArray;
+    CodeHeapStatArray[ix].alloc_topSizeBlocks   = alloc_topSizeBlocks;
+    CodeHeapStatArray[ix].used_topSizeBlocks    = used_topSizeBlocks;
+    CodeHeapStatArray[ix].SizeDistributionArray = SizeDistributionArray;
+    CodeHeapStatArray[ix].avgTemp               = avgTemp;
+    CodeHeapStatArray[ix].maxTemp               = maxTemp;
+    CodeHeapStatArray[ix].minTemp               = minTemp;
+  }
+}
+
+//---<  get a new statistics array  >---
+void CodeHeapState::prepare_StatArray(outputStream* out, size_t nElem, size_t granularity, const char* heapName) {
+  if (StatArray == NULL) {
+    StatArray      = new StatElement[nElem];
+    //---<  reset some counts  >---
+    alloc_granules = nElem;
+    granule_size   = granularity;
+  }
+
+  if (StatArray == NULL) {
+    //---<  just do nothing if allocation failed  >---
+    out->print_cr("Statistics could not be collected for %s, probably out of memory.", heapName);
+    out->print_cr("Current granularity is " SIZE_FORMAT " bytes. Try a coarser granularity.", granularity);
+    alloc_granules = 0;
+    granule_size   = 0;
+  } else {
+    //---<  initialize statistics array  >---
+    memset((void*)StatArray, 0, nElem*sizeof(StatElement));
+  }
+}
+
+//---<  get a new free block array  >---
+void CodeHeapState::prepare_FreeArray(outputStream* out, unsigned int nElem, const char* heapName) {
+  if (FreeArray == NULL) {
+    FreeArray      = new FreeBlk[nElem];
+    //---<  reset some counts  >---
+    alloc_freeBlocks = nElem;
+  }
+
+  if (FreeArray == NULL) {
+    //---<  just do nothing if allocation failed  >---
+    out->print_cr("Free space analysis cannot be done for %s, probably out of memory.", heapName);
+    alloc_freeBlocks = 0;
+  } else {
+    //---<  initialize free block array  >---
+    memset((void*)FreeArray, 0, alloc_freeBlocks*sizeof(FreeBlk));
+  }
+}
+
+//---<  get a new TopSizeArray  >---
+void CodeHeapState::prepare_TopSizeArray(outputStream* out, unsigned int nElem, const char* heapName) {
+  if (TopSizeArray == NULL) {
+    TopSizeArray   = new TopSizeBlk[nElem];
+    //---<  reset some counts  >---
+    alloc_topSizeBlocks = nElem;
+    used_topSizeBlocks  = 0;
+  }
+
+  if (TopSizeArray == NULL) {
+    //---<  just do nothing if allocation failed  >---
+    out->print_cr("Top-%d list of largest CodeHeap blocks can not be collected for %s, probably out of memory.", nElem, heapName);
+    alloc_topSizeBlocks = 0;
+  } else {
+    //---<  initialize TopSizeArray  >---
+    memset((void*)TopSizeArray, 0, nElem*sizeof(TopSizeBlk));
+    used_topSizeBlocks  = 0;
+  }
+}
+
+//---<  get a new SizeDistributionArray  >---
+void CodeHeapState::prepare_SizeDistArray(outputStream* out, unsigned int nElem, const char* heapName) {
+  if (SizeDistributionArray == NULL) {
+    SizeDistributionArray = new SizeDistributionElement[nElem];
+  }
+
+  if (SizeDistributionArray == NULL) {
+    //---<  just do nothing if allocation failed  >---
+    out->print_cr("Size distribution can not be collected for %s, probably out of memory.", heapName);
+  } else {
+    //---<  initialize SizeDistArray  >---
+    memset((void*)SizeDistributionArray, 0, nElem*sizeof(SizeDistributionElement));
+    // Logarithmic range growth. First range starts at _segment_size.
+    SizeDistributionArray[log2_seg_size-1].rangeEnd = 1U;
+    for (unsigned int i = log2_seg_size; i < nElem; i++) {
+      SizeDistributionArray[i].rangeStart = 1U << (i     - log2_seg_size);
+      SizeDistributionArray[i].rangeEnd   = 1U << ((i+1) - log2_seg_size);
+    }
+  }
+}
+
+//---<  get a new SizeDistributionArray  >---
+void CodeHeapState::update_SizeDistArray(outputStream* out, unsigned int len) {
+  if (SizeDistributionArray != NULL) {
+    for (unsigned int i = log2_seg_size-1; i < nSizeDistElements; i++) {
+      if ((SizeDistributionArray[i].rangeStart <= len) && (len < SizeDistributionArray[i].rangeEnd)) {
+        SizeDistributionArray[i].lenSum += len;
+        SizeDistributionArray[i].count++;
+        break;
+      }
+    }
+  }
+}
+
+void CodeHeapState::discard_StatArray(outputStream* out) {
+  if (StatArray != NULL) {
+    delete StatArray;
+    StatArray        = NULL;
+    alloc_granules   = 0;
+    granule_size     = 0;
+  }
+}
+
+void CodeHeapState::discard_FreeArray(outputStream* out) {
+  if (FreeArray != NULL) {
+    delete[] FreeArray;
+    FreeArray        = NULL;
+    alloc_freeBlocks = 0;
+  }
+}
+
+void CodeHeapState::discard_TopSizeArray(outputStream* out) {
+  if (TopSizeArray != NULL) {
+    delete[] TopSizeArray;
+    TopSizeArray        = NULL;
+    alloc_topSizeBlocks = 0;
+    used_topSizeBlocks  = 0;
+  }
+}
+
+void CodeHeapState::discard_SizeDistArray(outputStream* out) {
+  if (SizeDistributionArray != NULL) {
+    delete[] SizeDistributionArray;
+    SizeDistributionArray = NULL;
+  }
+}
+
+// Discard all allocated internal data structures.
+// This should be done after an analysis session is completed.
+void CodeHeapState::discard(outputStream* out, CodeHeap* heap) {
+  if (!initialization_complete) {
+    return;
+  }
+
+  if (nHeaps > 0) {
+    for (unsigned int ix = 0; ix < nHeaps; ix++) {
+      get_HeapStatGlobals(out, CodeHeapStatArray[ix].heapName);
+      discard_StatArray(out);
+      discard_FreeArray(out);
+      discard_TopSizeArray(out);
+      discard_SizeDistArray(out);
+      set_HeapStatGlobals(out, CodeHeapStatArray[ix].heapName);
+      CodeHeapStatArray[ix].heapName = NULL;
+    }
+    nHeaps = 0;
+  }
+}
+
+void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, const char* granularity_request) {
+  unsigned int nBlocks_free    = 0;
+  unsigned int nBlocks_used    = 0;
+  unsigned int nBlocks_zomb    = 0;
+  unsigned int nBlocks_disconn = 0;
+  unsigned int nBlocks_notentr = 0;
+
+  //---<  max & min of TopSizeArray  >---
+  //  it is sufficient to have these sizes as 32bit unsigned ints.
+  //  The CodeHeap is limited in size to 4GB. Furthermore, the sizes
+  //  are stored in _segment_size units, scaling them down by a factor of 64 (at least).
+  unsigned int  currMax          = 0;
+  unsigned int  currMin          = 0;
+  unsigned int  currMin_ix       = 0;
+  unsigned long total_iterations = 0;
+
+  bool  done             = false;
+  const int min_granules = 256;
+  const int max_granules = 512*K; // limits analyzable CodeHeap (with segment_granules) to 32M..128M
+                                  // results in StatArray size of 20M (= max_granules * 40 Bytes per element)
+                                  // For a 1GB CodeHeap, the granule size must be at least 2kB to not violate the max_granles limit.
+  const char* heapName   = get_heapName(heap);
+  STRINGSTREAM_DECL(ast, out)
+
+  if (!initialization_complete) {
+    memset(CodeHeapStatArray, 0, sizeof(CodeHeapStatArray));
+    initialization_complete = true;
+
+    printBox(ast, '=', "C O D E   H E A P   A N A L Y S I S   (general remarks)", NULL);
+    ast->print_cr("   The code heap analysis function provides deep insights into\n"
+                  "   the inner workings and the internal state of the Java VM's\n"
+                  "   code cache - the place where all the JVM generated machine\n"
+                  "   code is stored.\n"
+                  "   \n"
+                  "   This function is designed and provided for support engineers\n"
+                  "   to help them understand and solve issues in customer systems.\n"
+                  "   It is not intended for use and interpretation by other persons.\n"
+                  "   \n");
+    STRINGSTREAM_FLUSH("")
+  }
+  get_HeapStatGlobals(out, heapName);
+
+
+  // Since we are (and must be) analyzing the CodeHeap contents under the CodeCache_lock,
+  // all heap information is "constant" and can be safely extracted/calculated before we
+  // enter the while() loop. Actually, the loop will only be iterated once.
+  char*  low_bound     = heap->low_boundary();
+  size_t size          = heap->capacity();
+  size_t res_size      = heap->max_capacity();
+  seg_size             = heap->segment_size();
+  log2_seg_size        = seg_size == 0 ? 0 : exact_log2(seg_size);  // This is a global static value.
+
+  if (seg_size == 0) {
+    printBox(ast, '-', "Heap not fully initialized yet, segment size is zero for segment ", heapName);
+    STRINGSTREAM_FLUSH("")
+    return;
+  }
+
+  // Calculate granularity of analysis (and output).
+  //   The CodeHeap is managed (allocated) in segments (units) of CodeCacheSegmentSize.
+  //   The CodeHeap can become fairly large, in particular in productive real-life systems.
+  //
+  //   It is often neither feasible nor desirable to aggregate the data with the highest possible
+  //   level of detail, i.e. inspecting and printing each segment on its own.
+  //
+  //   The granularity parameter allows to specify the level of detail available in the analysis.
+  //   It must be a positive multiple of the segment size and should be selected such that enough
+  //   detail is provided while, at the same time, the printed output does not explode.
+  //
+  //   By manipulating the granularity value, we enforce that at least min_granules units
+  //   of analysis are available. We also enforce an upper limit of max_granules units to
+  //   keep the amount of allocated storage in check.
+  //
+  //   Finally, we adjust the granularity such that each granule covers at most 64k-1 segments.
+  //   This is necessary to prevent an unsigned short overflow while accumulating space information.
+  //
+  size_t granularity = strtol(granularity_request, NULL, 0);
+  if (granularity > size) {
+    granularity = size;
+  }
+  if (size/granularity < min_granules) {
+    granularity = size/min_granules;                                   // at least min_granules granules
+  }
+  granularity = granularity & (~(seg_size - 1));                       // must be multiple of seg_size
+  if (granularity < seg_size) {
+    granularity = seg_size;                                            // must be at least seg_size
+  }
+  if (size/granularity > max_granules) {
+    granularity = size/max_granules;                                   // at most max_granules granules
+  }
+  granularity = granularity & (~(seg_size - 1));                       // must be multiple of seg_size
+  if (granularity>>log2_seg_size >= (1L<<sizeof(unsigned short)*8)) {
+    granularity = ((1L<<(sizeof(unsigned short)*8))-1)<<log2_seg_size; // Limit: (64k-1) * seg_size
+  }
+  segment_granules = granularity == seg_size;
+  size_t granules  = (size + (granularity-1))/granularity;
+
+  printBox(ast, '=', "C O D E   H E A P   A N A L Y S I S   (used blocks) for segment ", heapName);
+  ast->print_cr("   The aggregate step takes an aggregated snapshot of the CodeHeap.\n"
+                "   Subsequent print functions create their output based on this snapshot.\n"
+                "   The CodeHeap is a living thing, and every effort has been made for the\n"
+                "   collected data to be consistent. Only the method names and signatures\n"
+                "   are retrieved at print time. That may lead to rare cases where the\n"
+                "   name of a method is no longer available, e.g. because it was unloaded.\n");
+  ast->print_cr("   CodeHeap committed size " SIZE_FORMAT "K (" SIZE_FORMAT "M), reserved size " SIZE_FORMAT "K (" SIZE_FORMAT "M), %d%% occupied.",
+                size/(size_t)K, size/(size_t)M, res_size/(size_t)K, res_size/(size_t)M, (unsigned int)(100.0*size/res_size));
+  ast->print_cr("   CodeHeap allocation segment size is " SIZE_FORMAT " bytes. This is the smallest possible granularity.", seg_size);
+  ast->print_cr("   CodeHeap (committed part) is mapped to " SIZE_FORMAT " granules of size " SIZE_FORMAT " bytes.", granules, granularity);
+  ast->print_cr("   Each granule takes " SIZE_FORMAT " bytes of C heap, that is " SIZE_FORMAT "K in total for statistics data.", sizeof(StatElement), (sizeof(StatElement)*granules)/(size_t)K);
+  ast->print_cr("   The number of granules is limited to %dk, requiring a granules size of at least %d bytes for a 1GB heap.", (unsigned int)(max_granules/K), (unsigned int)(G/max_granules));
+  STRINGSTREAM_FLUSH("\n")
+
+
+  while (!done) {
+    //---<  reset counters with every aggregation  >---
+    nBlocks_t1       = 0;
+    nBlocks_t2       = 0;
+    nBlocks_alive    = 0;
+    nBlocks_dead     = 0;
+    nBlocks_unloaded = 0;
+    nBlocks_stub     = 0;
+
+    nBlocks_free     = 0;
+    nBlocks_used     = 0;
+    nBlocks_zomb     = 0;
+    nBlocks_disconn  = 0;
+    nBlocks_notentr  = 0;
+
+    //---<  discard old arrays if size does not match  >---
+    if (granules != alloc_granules) {
+      discard_StatArray(out);
+      discard_TopSizeArray(out);
+    }
+
+    //---<  allocate arrays if they don't yet exist, initialize  >---
+    prepare_StatArray(out, granules, granularity, heapName);
+    if (StatArray == NULL) {
+      set_HeapStatGlobals(out, heapName);
+      return;
+    }
+    prepare_TopSizeArray(out, maxTopSizeBlocks, heapName);
+    prepare_SizeDistArray(out, nSizeDistElements, heapName);
+
+    latest_compilation_id = CompileBroker::get_compilation_id();
+    unsigned int highest_compilation_id = 0;
+    size_t       usedSpace     = 0;
+    size_t       t1Space       = 0;
+    size_t       t2Space       = 0;
+    size_t       aliveSpace    = 0;
+    size_t       disconnSpace  = 0;
+    size_t       notentrSpace  = 0;
+    size_t       deadSpace     = 0;
+    size_t       unloadedSpace = 0;
+    size_t       stubSpace     = 0;
+    size_t       freeSpace     = 0;
+    size_t       maxFreeSize   = 0;
+    HeapBlock*   maxFreeBlock  = NULL;
+    bool         insane        = false;
+
+    int64_t hotnessAccumulator = 0;
+    unsigned int n_methods     = 0;
+    avgTemp       = 0;
+    minTemp       = (int)(res_size > M ? (res_size/M)*2 : 1);
+    maxTemp       = -minTemp;
+
+    for (HeapBlock *h = heap->first_block(); h != NULL && !insane; h = heap->next_block(h)) {
+      unsigned int hb_len     = (unsigned int)h->length();  // despite being size_t, length can never overflow an unsigned int.
+      size_t       hb_bytelen = ((size_t)hb_len)<<log2_seg_size;
+      unsigned int ix_beg     = (unsigned int)(((char*)h-low_bound)/granule_size);
+      unsigned int ix_end     = (unsigned int)(((char*)h-low_bound+(hb_bytelen-1))/granule_size);
+      unsigned int compile_id = 0;
+      CompLevel    comp_lvl   = CompLevel_none;
+      compType     cType      = noComp;
+      blobType     cbType     = noType;
+
+      //---<  some sanity checks  >---
+      // Do not assert here, just check, print error message and return.
+      // This is a diagnostic function. It is not supposed to tear down the VM.
+      if ((char*)h <  low_bound ) {
+        insane = true; ast->print_cr("Sanity check: HeapBlock @%p below low bound (%p)", (char*)h, low_bound);
+      }
+      if (ix_end   >= granules  ) {
+        insane = true; ast->print_cr("Sanity check: end index (%d) out of bounds (" SIZE_FORMAT ")", ix_end, granules);
+      }
+      if (size     != heap->capacity()) {
+        insane = true; ast->print_cr("Sanity check: code heap capacity has changed (" SIZE_FORMAT "K to " SIZE_FORMAT "K)", size/(size_t)K, heap->capacity()/(size_t)K);
+      }
+      if (ix_beg   >  ix_end    ) {
+        insane = true; ast->print_cr("Sanity check: end index (%d) lower than begin index (%d)", ix_end, ix_beg);
+      }
+      if (insane) {
+        STRINGSTREAM_FLUSH("")
+        continue;
+      }
+
+      if (h->free()) {
+        nBlocks_free++;
+        freeSpace    += hb_bytelen;
+        if (hb_bytelen > maxFreeSize) {
+          maxFreeSize   = hb_bytelen;
+          maxFreeBlock  = h;
+        }
+      } else {
+        update_SizeDistArray(out, hb_len);
+        nBlocks_used++;
+        usedSpace    += hb_bytelen;
+        CodeBlob* cb  = (CodeBlob*)heap->find_start(h);
+        if (cb != NULL) {
+          cbType = get_cbType(cb);
+          if (cb->is_nmethod()) {
+            compile_id = ((nmethod*)cb)->compile_id();
+            comp_lvl   = (CompLevel)((nmethod*)cb)->comp_level();
+            if (((nmethod*)cb)->is_compiled_by_c1()) {
+              cType = c1;
+            }
+            if (((nmethod*)cb)->is_compiled_by_c2()) {
+              cType = c2;
+            }
+            if (((nmethod*)cb)->is_compiled_by_jvmci()) {
+              cType = jvmci;
+            }
+            switch (cbType) {
+              case nMethod_inuse: { // only for executable methods!!!
+                // space for these cbs is accounted for later.
+                int temperature = ((nmethod*)cb)->hotness_counter();
+                hotnessAccumulator += temperature;
+                n_methods++;
+                maxTemp = (temperature > maxTemp) ? temperature : maxTemp;
+                minTemp = (temperature < minTemp) ? temperature : minTemp;
+                break;
+              }
+              case nMethod_notused:
+                nBlocks_alive++;
+                nBlocks_disconn++;
+                aliveSpace     += hb_bytelen;
+                disconnSpace   += hb_bytelen;
+                break;
+              case nMethod_notentrant:  // equivalent to nMethod_alive
+                nBlocks_alive++;
+                nBlocks_notentr++;
+                aliveSpace     += hb_bytelen;
+                notentrSpace   += hb_bytelen;
+                break;
+              case nMethod_unloaded:
+                nBlocks_unloaded++;
+                unloadedSpace  += hb_bytelen;
+                break;
+              case nMethod_dead:
+                nBlocks_dead++;
+                deadSpace      += hb_bytelen;
+                break;
+              default:
+                break;
+            }
+          }
+
+          //------------------------------------------
+          //---<  register block in TopSizeArray  >---
+          //------------------------------------------
+          if (alloc_topSizeBlocks > 0) {
+            if (used_topSizeBlocks == 0) {
+              TopSizeArray[0].start    = h;
+              TopSizeArray[0].len      = hb_len;
+              TopSizeArray[0].index    = tsbStopper;
+              TopSizeArray[0].compiler = cType;
+              TopSizeArray[0].level    = comp_lvl;
+              TopSizeArray[0].type     = cbType;
+              currMax    = hb_len;
+              currMin    = hb_len;
+              currMin_ix = 0;
+              used_topSizeBlocks++;
+            // This check roughly cuts 5000 iterations (JVM98, mixed, dbg, termination stats):
+            } else if ((used_topSizeBlocks < alloc_topSizeBlocks) && (hb_len < currMin)) {
+              //---<  all blocks in list are larger, but there is room left in array  >---
+              TopSizeArray[currMin_ix].index = used_topSizeBlocks;
+              TopSizeArray[used_topSizeBlocks].start    = h;
+              TopSizeArray[used_topSizeBlocks].len      = hb_len;
+              TopSizeArray[used_topSizeBlocks].index    = tsbStopper;
+              TopSizeArray[used_topSizeBlocks].compiler = cType;
+              TopSizeArray[used_topSizeBlocks].level    = comp_lvl;
+              TopSizeArray[used_topSizeBlocks].type     = cbType;
+              currMin    = hb_len;
+              currMin_ix = used_topSizeBlocks;
+              used_topSizeBlocks++;
+            } else {
+              // This check cuts total_iterations by a factor of 6 (JVM98, mixed, dbg, termination stats):
+              //   We don't need to search the list if we know beforehand that the current block size is
+              //   smaller than the currently recorded minimum and there is no free entry left in the list.
+              if (!((used_topSizeBlocks == alloc_topSizeBlocks) && (hb_len <= currMin))) {
+                if (currMax < hb_len) {
+                  currMax = hb_len;
+                }
+                unsigned int i;
+                unsigned int prev_i  = tsbStopper;
+                unsigned int limit_i =  0;
+                for (i = 0; i != tsbStopper; i = TopSizeArray[i].index) {
+                  if (limit_i++ >= alloc_topSizeBlocks) {
+                    insane = true; break; // emergency exit
+                  }
+                  if (i >= used_topSizeBlocks)  {
+                    insane = true; break; // emergency exit
+                  }
+                  total_iterations++;
+                  if (TopSizeArray[i].len < hb_len) {
+                    //---<  We want to insert here, element <i> is smaller than the current one  >---
+                    if (used_topSizeBlocks < alloc_topSizeBlocks) { // still room for a new entry to insert
+                      // old entry gets moved to the next free element of the array.
+                      // That's necessary to keep the entry for the largest block at index 0.
+                      // This move might cause the current minimum to be moved to another place
+                      if (i == currMin_ix) {
+                        assert(TopSizeArray[i].len == currMin, "sort error");
+                        currMin_ix = used_topSizeBlocks;
+                      }
+                      memcpy((void*)&TopSizeArray[used_topSizeBlocks], (void*)&TopSizeArray[i], sizeof(TopSizeBlk));
+                      TopSizeArray[i].start    = h;
+                      TopSizeArray[i].len      = hb_len;
+                      TopSizeArray[i].index    = used_topSizeBlocks;
+                      TopSizeArray[i].compiler = cType;
+                      TopSizeArray[i].level    = comp_lvl;
+                      TopSizeArray[i].type     = cbType;
+                      used_topSizeBlocks++;
+                    } else { // no room for new entries, current block replaces entry for smallest block
+                      //---<  Find last entry (entry for smallest remembered block)  >---
+                      unsigned int      j  = i;
+                      unsigned int prev_j  = tsbStopper;
+                      unsigned int limit_j = 0;
+                      while (TopSizeArray[j].index != tsbStopper) {
+                        if (limit_j++ >= alloc_topSizeBlocks) {
+                          insane = true; break; // emergency exit
+                        }
+                        if (j >= used_topSizeBlocks)  {
+                          insane = true; break; // emergency exit
+                        }
+                        total_iterations++;
+                        prev_j = j;
+                        j      = TopSizeArray[j].index;
+                      }
+                      if (!insane) {
+                        if (prev_j == tsbStopper) {
+                          //---<  Above while loop did not iterate, we already are the min entry  >---
+                          //---<  We have to just replace the smallest entry                      >---
+                          currMin    = hb_len;
+                          currMin_ix = j;
+                          TopSizeArray[j].start    = h;
+                          TopSizeArray[j].len      = hb_len;
+                          TopSizeArray[j].index    = tsbStopper; // already set!!
+                          TopSizeArray[j].compiler = cType;
+                          TopSizeArray[j].level    = comp_lvl;
+                          TopSizeArray[j].type     = cbType;
+                        } else {
+                          //---<  second-smallest entry is now smallest  >---
+                          TopSizeArray[prev_j].index = tsbStopper;
+                          currMin    = TopSizeArray[prev_j].len;
+                          currMin_ix = prev_j;
+                          //---<  smallest entry gets overwritten  >---
+                          memcpy((void*)&TopSizeArray[j], (void*)&TopSizeArray[i], sizeof(TopSizeBlk));
+                          TopSizeArray[i].start    = h;
+                          TopSizeArray[i].len      = hb_len;
+                          TopSizeArray[i].index    = j;
+                          TopSizeArray[i].compiler = cType;
+                          TopSizeArray[i].level    = comp_lvl;
+                          TopSizeArray[i].type     = cbType;
+                        }
+                      } // insane
+                    }
+                    break;
+                  }
+                  prev_i = i;
+                }
+                if (insane) {
+                  // Note: regular analysis could probably continue by resetting "insane" flag.
+                  out->print_cr("Possible loop in TopSizeBlocks list detected. Analysis aborted.");
+                  discard_TopSizeArray(out);
+                }
+              }
+            }
+          }
+          //----------------------------------------------
+          //---<  END register block in TopSizeArray  >---
+          //----------------------------------------------
+        } else {
+          nBlocks_zomb++;
+        }
+
+        if (ix_beg == ix_end) {
+          StatArray[ix_beg].type = cbType;
+          switch (cbType) {
+            case nMethod_inuse:
+              highest_compilation_id = (highest_compilation_id >= compile_id) ? highest_compilation_id : compile_id;
+              if (comp_lvl < CompLevel_full_optimization) {
+                nBlocks_t1++;
+                t1Space   += hb_bytelen;
+                StatArray[ix_beg].t1_count++;
+                StatArray[ix_beg].t1_space += (unsigned short)hb_len;
+                StatArray[ix_beg].t1_age    = StatArray[ix_beg].t1_age < compile_id ? compile_id : StatArray[ix_beg].t1_age;
+              } else {
+                nBlocks_t2++;
+                t2Space   += hb_bytelen;
+                StatArray[ix_beg].t2_count++;
+                StatArray[ix_beg].t2_space += (unsigned short)hb_len;
+                StatArray[ix_beg].t2_age    = StatArray[ix_beg].t2_age < compile_id ? compile_id : StatArray[ix_beg].t2_age;
+              }
+              StatArray[ix_beg].level     = comp_lvl;
+              StatArray[ix_beg].compiler  = cType;
+              break;
+            case nMethod_alive:
+              StatArray[ix_beg].tx_count++;
+              StatArray[ix_beg].tx_space += (unsigned short)hb_len;
+              StatArray[ix_beg].tx_age    = StatArray[ix_beg].tx_age < compile_id ? compile_id : StatArray[ix_beg].tx_age;
+              StatArray[ix_beg].level     = comp_lvl;
+              StatArray[ix_beg].compiler  = cType;
+              break;
+            case nMethod_dead:
+            case nMethod_unloaded:
+              StatArray[ix_beg].dead_count++;
+              StatArray[ix_beg].dead_space += (unsigned short)hb_len;
+              break;
+            default:
+              // must be a stub, if it's not a dead or alive nMethod
+              nBlocks_stub++;
+              stubSpace   += hb_bytelen;
+              StatArray[ix_beg].stub_count++;
+              StatArray[ix_beg].stub_space += (unsigned short)hb_len;
+              break;
+          }
+        } else {
+          unsigned int beg_space = (unsigned int)(granule_size - ((char*)h - low_bound - ix_beg*granule_size));
+          unsigned int end_space = (unsigned int)(hb_bytelen - beg_space - (ix_end-ix_beg-1)*granule_size);
+          beg_space = beg_space>>log2_seg_size;  // store in units of _segment_size
+          end_space = end_space>>log2_seg_size;  // store in units of _segment_size
+          StatArray[ix_beg].type = cbType;
+          StatArray[ix_end].type = cbType;
+          switch (cbType) {
+            case nMethod_inuse:
+              highest_compilation_id = (highest_compilation_id >= compile_id) ? highest_compilation_id : compile_id;
+              if (comp_lvl < CompLevel_full_optimization) {
+                nBlocks_t1++;
+                t1Space   += hb_bytelen;
+                StatArray[ix_beg].t1_count++;
+                StatArray[ix_beg].t1_space += (unsigned short)beg_space;
+                StatArray[ix_beg].t1_age    = StatArray[ix_beg].t1_age < compile_id ? compile_id : StatArray[ix_beg].t1_age;
+
+                StatArray[ix_end].t1_count++;
+                StatArray[ix_end].t1_space += (unsigned short)end_space;
+                StatArray[ix_end].t1_age    = StatArray[ix_end].t1_age < compile_id ? compile_id : StatArray[ix_end].t1_age;
+              } else {
+                nBlocks_t2++;
+                t2Space   += hb_bytelen;
+                StatArray[ix_beg].t2_count++;
+                StatArray[ix_beg].t2_space += (unsigned short)beg_space;
+                StatArray[ix_beg].t2_age    = StatArray[ix_beg].t2_age < compile_id ? compile_id : StatArray[ix_beg].t2_age;
+
+                StatArray[ix_end].t2_count++;
+                StatArray[ix_end].t2_space += (unsigned short)end_space;
+                StatArray[ix_end].t2_age    = StatArray[ix_end].t2_age < compile_id ? compile_id : StatArray[ix_end].t2_age;
+              }
+              StatArray[ix_beg].level     = comp_lvl;
+              StatArray[ix_beg].compiler  = cType;
+              StatArray[ix_end].level     = comp_lvl;
+              StatArray[ix_end].compiler  = cType;
+              break;
+            case nMethod_alive:
+              StatArray[ix_beg].tx_count++;
+              StatArray[ix_beg].tx_space += (unsigned short)beg_space;
+              StatArray[ix_beg].tx_age    = StatArray[ix_beg].tx_age < compile_id ? compile_id : StatArray[ix_beg].tx_age;
+
+              StatArray[ix_end].tx_count++;
+              StatArray[ix_end].tx_space += (unsigned short)end_space;
+              StatArray[ix_end].tx_age    = StatArray[ix_end].tx_age < compile_id ? compile_id : StatArray[ix_end].tx_age;
+
+              StatArray[ix_beg].level     = comp_lvl;
+              StatArray[ix_beg].compiler  = cType;
+              StatArray[ix_end].level     = comp_lvl;
+              StatArray[ix_end].compiler  = cType;
+              break;
+            case nMethod_dead:
+            case nMethod_unloaded:
+              StatArray[ix_beg].dead_count++;
+              StatArray[ix_beg].dead_space += (unsigned short)beg_space;
+              StatArray[ix_end].dead_count++;
+              StatArray[ix_end].dead_space += (unsigned short)end_space;
+              break;
+            default:
+              // must be a stub, if it's not a dead or alive nMethod
+              nBlocks_stub++;
+              stubSpace   += hb_bytelen;
+              StatArray[ix_beg].stub_count++;
+              StatArray[ix_beg].stub_space += (unsigned short)beg_space;
+              StatArray[ix_end].stub_count++;
+              StatArray[ix_end].stub_space += (unsigned short)end_space;
+              break;
+          }
+          for (unsigned int ix = ix_beg+1; ix < ix_end; ix++) {
+            StatArray[ix].type = cbType;
+            switch (cbType) {
+              case nMethod_inuse:
+                if (comp_lvl < CompLevel_full_optimization) {
+                  StatArray[ix].t1_count++;
+                  StatArray[ix].t1_space += (unsigned short)(granule_size>>log2_seg_size);
+                  StatArray[ix].t1_age    = StatArray[ix].t1_age < compile_id ? compile_id : StatArray[ix].t1_age;
+                } else {
+                  StatArray[ix].t2_count++;
+                  StatArray[ix].t2_space += (unsigned short)(granule_size>>log2_seg_size);
+                  StatArray[ix].t2_age    = StatArray[ix].t2_age < compile_id ? compile_id : StatArray[ix].t2_age;
+                }
+                StatArray[ix].level     = comp_lvl;
+                StatArray[ix].compiler  = cType;
+                break;
+              case nMethod_alive:
+                StatArray[ix].tx_count++;
+                StatArray[ix].tx_space += (unsigned short)(granule_size>>log2_seg_size);
+                StatArray[ix].tx_age    = StatArray[ix].tx_age < compile_id ? compile_id : StatArray[ix].tx_age;
+                StatArray[ix].level     = comp_lvl;
+                StatArray[ix].compiler  = cType;
+                break;
+              case nMethod_dead:
+              case nMethod_unloaded:
+                StatArray[ix].dead_count++;
+                StatArray[ix].dead_space += (unsigned short)(granule_size>>log2_seg_size);
+                break;
+              default:
+                // must be a stub, if it's not a dead or alive nMethod
+                StatArray[ix].stub_count++;
+                StatArray[ix].stub_space += (unsigned short)(granule_size>>log2_seg_size);
+                break;
+            }
+          }
+        }
+      }
+    }
+    if (n_methods > 0) {
+      avgTemp = hotnessAccumulator/n_methods;
+    } else {
+      avgTemp = 0;
+    }
+    done = true;
+
+    if (!insane) {
+      // There is a risk for this block (because it contains many print statements) to get
+      // interspersed with print data from other threads. We take this risk intentionally.
+      // Getting stalled waiting for tty_lock while holding the CodeCache_lock is not desirable.
+      printBox(ast, '-', "Global CodeHeap statistics for segment ", heapName);
+      ast->print_cr("freeSpace        = " SIZE_FORMAT_W(8) "k, nBlocks_free     = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", freeSpace/(size_t)K,     nBlocks_free,     (100.0*freeSpace)/size,     (100.0*freeSpace)/res_size);
+      ast->print_cr("usedSpace        = " SIZE_FORMAT_W(8) "k, nBlocks_used     = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", usedSpace/(size_t)K,     nBlocks_used,     (100.0*usedSpace)/size,     (100.0*usedSpace)/res_size);
+      ast->print_cr("  Tier1 Space    = " SIZE_FORMAT_W(8) "k, nBlocks_t1       = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", t1Space/(size_t)K,       nBlocks_t1,       (100.0*t1Space)/size,       (100.0*t1Space)/res_size);
+      ast->print_cr("  Tier2 Space    = " SIZE_FORMAT_W(8) "k, nBlocks_t2       = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", t2Space/(size_t)K,       nBlocks_t2,       (100.0*t2Space)/size,       (100.0*t2Space)/res_size);
+      ast->print_cr("  Alive Space    = " SIZE_FORMAT_W(8) "k, nBlocks_alive    = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", aliveSpace/(size_t)K,    nBlocks_alive,    (100.0*aliveSpace)/size,    (100.0*aliveSpace)/res_size);
+      ast->print_cr("    disconnected = " SIZE_FORMAT_W(8) "k, nBlocks_disconn  = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", disconnSpace/(size_t)K,  nBlocks_disconn,  (100.0*disconnSpace)/size,  (100.0*disconnSpace)/res_size);
+      ast->print_cr("    not entrant  = " SIZE_FORMAT_W(8) "k, nBlocks_notentr  = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", notentrSpace/(size_t)K,  nBlocks_notentr,  (100.0*notentrSpace)/size,  (100.0*notentrSpace)/res_size);
+      ast->print_cr("  unloadedSpace  = " SIZE_FORMAT_W(8) "k, nBlocks_unloaded = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", unloadedSpace/(size_t)K, nBlocks_unloaded, (100.0*unloadedSpace)/size, (100.0*unloadedSpace)/res_size);
+      ast->print_cr("  deadSpace      = " SIZE_FORMAT_W(8) "k, nBlocks_dead     = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", deadSpace/(size_t)K,     nBlocks_dead,     (100.0*deadSpace)/size,     (100.0*deadSpace)/res_size);
+      ast->print_cr("  stubSpace      = " SIZE_FORMAT_W(8) "k, nBlocks_stub     = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", stubSpace/(size_t)K,     nBlocks_stub,     (100.0*stubSpace)/size,     (100.0*stubSpace)/res_size);
+      ast->print_cr("ZombieBlocks     = %8d. These are HeapBlocks which could not be identified as CodeBlobs.", nBlocks_zomb);
+      ast->print_cr("latest allocated compilation id = %d", latest_compilation_id);
+      ast->print_cr("highest observed compilation id = %d", highest_compilation_id);
+      ast->print_cr("Building TopSizeList iterations = %ld", total_iterations);
+      ast->cr();
+
+      int             reset_val = NMethodSweeper::hotness_counter_reset_val();
+      double reverse_free_ratio = (res_size > size) ? (double)res_size/(double)(res_size-size) : (double)res_size;
+      printBox(ast, '-', "Method hotness information at time of this analysis", NULL);
+      ast->print_cr("Highest possible method temperature:          %12d", reset_val);
+      ast->print_cr("Threshold for method to be considered 'cold': %12.3f", -reset_val + reverse_free_ratio * NmethodSweepActivity);
+      ast->print_cr("min. hotness = %6d", minTemp);
+      ast->print_cr("avg. hotness = %6d", avgTemp);
+      ast->print_cr("max. hotness = %6d", maxTemp);
+      STRINGSTREAM_FLUSH("\n")
+
+      // This loop is intentionally printing directly to "out".
+      out->print("Verifying collected data...");
+      size_t granule_segs = granule_size>>log2_seg_size;
+      for (unsigned int ix = 0; ix < granules; ix++) {
+        if (StatArray[ix].t1_count   > granule_segs) {
+          out->print_cr("t1_count[%d]   = %d", ix, StatArray[ix].t1_count);
+        }
+        if (StatArray[ix].t2_count   > granule_segs) {
+          out->print_cr("t2_count[%d]   = %d", ix, StatArray[ix].t2_count);
+        }
+        if (StatArray[ix].stub_count > granule_segs) {
+          out->print_cr("stub_count[%d] = %d", ix, StatArray[ix].stub_count);
+        }
+        if (StatArray[ix].dead_count > granule_segs) {
+          out->print_cr("dead_count[%d] = %d", ix, StatArray[ix].dead_count);
+        }
+        if (StatArray[ix].t1_space   > granule_segs) {
+          out->print_cr("t1_space[%d]   = %d", ix, StatArray[ix].t1_space);
+        }
+        if (StatArray[ix].t2_space   > granule_segs) {
+          out->print_cr("t2_space[%d]   = %d", ix, StatArray[ix].t2_space);
+        }
+        if (StatArray[ix].stub_space > granule_segs) {
+          out->print_cr("stub_space[%d] = %d", ix, StatArray[ix].stub_space);
+        }
+        if (StatArray[ix].dead_space > granule_segs) {
+          out->print_cr("dead_space[%d] = %d", ix, StatArray[ix].dead_space);
+        }
+        //   this cast is awful! I need it because NT/Intel reports a signed/unsigned mismatch.
+        if ((size_t)(StatArray[ix].t1_count+StatArray[ix].t2_count+StatArray[ix].stub_count+StatArray[ix].dead_count) > granule_segs) {
+          out->print_cr("t1_count[%d] = %d, t2_count[%d] = %d, stub_count[%d] = %d", ix, StatArray[ix].t1_count, ix, StatArray[ix].t2_count, ix, StatArray[ix].stub_count);
+        }
+        if ((size_t)(StatArray[ix].t1_space+StatArray[ix].t2_space+StatArray[ix].stub_space+StatArray[ix].dead_space) > granule_segs) {
+          out->print_cr("t1_space[%d] = %d, t2_space[%d] = %d, stub_space[%d] = %d", ix, StatArray[ix].t1_space, ix, StatArray[ix].t2_space, ix, StatArray[ix].stub_space);
+        }
+      }
+
+      // This loop is intentionally printing directly to "out".
+      if (used_topSizeBlocks > 0) {
+        unsigned int j = 0;
+        if (TopSizeArray[0].len != currMax) {
+          out->print_cr("currMax(%d) differs from TopSizeArray[0].len(%d)", currMax, TopSizeArray[0].len);
+        }
+        for (unsigned int i = 0; (TopSizeArray[i].index != tsbStopper) && (j++ < alloc_topSizeBlocks); i = TopSizeArray[i].index) {
+          if (TopSizeArray[i].len < TopSizeArray[TopSizeArray[i].index].len) {
+            out->print_cr("sort error at index %d: %d !>= %d", i, TopSizeArray[i].len, TopSizeArray[TopSizeArray[i].index].len);
+          }
+        }
+        if (j >= alloc_topSizeBlocks) {
+          out->print_cr("Possible loop in TopSizeArray chaining!\n  allocBlocks = %d, usedBlocks = %d", alloc_topSizeBlocks, used_topSizeBlocks);
+          for (unsigned int i = 0; i < alloc_topSizeBlocks; i++) {
+            out->print_cr("  TopSizeArray[%d].index = %d, len = %d", i, TopSizeArray[i].index, TopSizeArray[i].len);
+          }
+        }
+      }
+      out->print_cr("...done\n\n");
+    } else {
+      // insane heap state detected. Analysis data incomplete. Just throw it away.
+      discard_StatArray(out);
+      discard_TopSizeArray(out);
+    }
+  }
+
+
+  done        = false;
+  while (!done && (nBlocks_free > 0)) {
+
+    printBox(ast, '=', "C O D E   H E A P   A N A L Y S I S   (free blocks) for segment ", heapName);
+    ast->print_cr("   The aggregate step collects information about all free blocks in CodeHeap.\n"
+                  "   Subsequent print functions create their output based on this snapshot.\n");
+    ast->print_cr("   Free space in %s is distributed over %d free blocks.", heapName, nBlocks_free);
+    ast->print_cr("   Each free block takes " SIZE_FORMAT " bytes of C heap for statistics data, that is " SIZE_FORMAT "K in total.", sizeof(FreeBlk), (sizeof(FreeBlk)*nBlocks_free)/K);
+    STRINGSTREAM_FLUSH("\n")
+
+    //----------------------------------------
+    //--  Prepare the FreeArray of FreeBlks --
+    //----------------------------------------
+
+    //---< discard old array if size does not match  >---
+    if (nBlocks_free != alloc_freeBlocks) {
+      discard_FreeArray(out);
+    }
+
+    prepare_FreeArray(out, nBlocks_free, heapName);
+    if (FreeArray == NULL) {
+      done = true;
+      continue;
+    }
+
+    //----------------------------------------
+    //--  Collect all FreeBlks in FreeArray --
+    //----------------------------------------
+
+    unsigned int ix = 0;
+    FreeBlock* cur  = heap->freelist();
+
+    while (cur != NULL) {
+      if (ix < alloc_freeBlocks) { // don't index out of bounds if _freelist has more blocks than anticipated
+        FreeArray[ix].start = cur;
+        FreeArray[ix].len   = (unsigned int)(cur->length()<<log2_seg_size);
+        FreeArray[ix].index = ix;
+      }
+      cur  = cur->link();
+      ix++;
+    }
+    if (ix != alloc_freeBlocks) {
+      ast->print_cr("Free block count mismatch. Expected %d free blocks, but found %d.", alloc_freeBlocks, ix);
+      ast->print_cr("I will update the counter and retry data collection");
+      STRINGSTREAM_FLUSH("\n")
+      nBlocks_free = ix;
+      continue;
+    }
+    done = true;
+  }
+
+  if (!done || (nBlocks_free == 0)) {
+    if (nBlocks_free == 0) {
+      printBox(ast, '-', "no free blocks found in", heapName);
+    } else if (!done) {
+      ast->print_cr("Free block count mismatch could not be resolved.");
+      ast->print_cr("Try to run \"aggregate\" function to update counters");
+    }
+    STRINGSTREAM_FLUSH("")
+
+    //---< discard old array and update global values  >---
+    discard_FreeArray(out);
+    set_HeapStatGlobals(out, heapName);
+    return;
+  }
+
+  //---<  calculate and fill remaining fields  >---
+  if (FreeArray != NULL) {
+    // This loop is intentionally printing directly to "out".
+    for (unsigned int ix = 0; ix < alloc_freeBlocks-1; ix++) {
+      size_t lenSum = 0;
+      FreeArray[ix].gap = (unsigned int)((address)FreeArray[ix+1].start - ((address)FreeArray[ix].start + FreeArray[ix].len));
+      for (HeapBlock *h = heap->next_block(FreeArray[ix].start); (h != NULL) && (h != FreeArray[ix+1].start); h = heap->next_block(h)) {
+        CodeBlob *cb  = (CodeBlob*)(heap->find_start(h));
+        if ((cb != NULL) && !cb->is_nmethod()) {
+          FreeArray[ix].stubs_in_gap = true;
+        }
+        FreeArray[ix].n_gapBlocks++;
+        lenSum += h->length()<<log2_seg_size;
+        if (((address)h < ((address)FreeArray[ix].start+FreeArray[ix].len)) || (h >= FreeArray[ix+1].start)) {
+          out->print_cr("unsorted occupied CodeHeap block found @ %p, gap interval [%p, %p)", h, (address)FreeArray[ix].start+FreeArray[ix].len, FreeArray[ix+1].start);
+        }
+      }
+      if (lenSum != FreeArray[ix].gap) {
+        out->print_cr("Length mismatch for gap between FreeBlk[%d] and FreeBlk[%d]. Calculated: %d, accumulated: %d.", ix, ix+1, FreeArray[ix].gap, (unsigned int)lenSum);
+      }
+    }
+  }
+  set_HeapStatGlobals(out, heapName);
+
+  printBox(ast, '=', "C O D E   H E A P   A N A L Y S I S   C O M P L E T E   for segment ", heapName);
+  STRINGSTREAM_FLUSH("\n")
+}
+
+
+void CodeHeapState::print_usedSpace(outputStream* out, CodeHeap* heap) {
+  if (!initialization_complete) {
+    return;
+  }
+
+  const char* heapName   = get_heapName(heap);
+  get_HeapStatGlobals(out, heapName);
+
+  if ((StatArray == NULL) || (TopSizeArray == NULL) || (used_topSizeBlocks == 0)) {
+    return;
+  }
+  STRINGSTREAM_DECL(ast, out)
+
+  {
+    printBox(ast, '=', "U S E D   S P A C E   S T A T I S T I C S   for ", heapName);
+    ast->print_cr("Note: The Top%d list of the largest used blocks associates method names\n"
+                  "      and other identifying information with the block size data.\n"
+                  "\n"
+                  "      Method names are dynamically retrieved from the code cache at print time.\n"
+                  "      Due to the living nature of the code cache and because the CodeCache_lock\n"
+                  "      is not continuously held, the displayed name might be wrong or no name\n"
+                  "      might be found at all. The likelihood for that to happen increases\n"
+                  "      over time passed between analysis and print step.\n", used_topSizeBlocks);
+    STRINGSTREAM_FLUSH_LOCKED("\n")
+  }
+
+  //----------------------------
+  //--  Print Top Used Blocks --
+  //----------------------------
+  {
+    char*     low_bound = heap->low_boundary();
+
+    printBox(ast, '-', "Largest Used Blocks in ", heapName);
+    print_blobType_legend(ast);
+
+    ast->fill_to(51);
+    ast->print("%4s", "blob");
+    ast->fill_to(56);
+    ast->print("%9s", "compiler");
+    ast->fill_to(66);
+    ast->print_cr("%6s", "method");
+    ast->print_cr("%18s %13s %17s %4s %9s  %5s %s",      "Addr(module)      ", "offset", "size", "type", " type lvl", " temp", "Name");
+    STRINGSTREAM_FLUSH_LOCKED("")
+
+    //---<  print Top Ten Used Blocks  >---
+    if (used_topSizeBlocks > 0) {
+      unsigned int printed_topSizeBlocks = 0;
+      for (unsigned int i = 0; i != tsbStopper; i = TopSizeArray[i].index) {
+        printed_topSizeBlocks++;
+        CodeBlob*   this_blob = (CodeBlob*)(heap->find_start(TopSizeArray[i].start));
+        nmethod*           nm = NULL;
+        const char* blob_name = "unnamed blob";
+        if (this_blob != NULL) {
+          blob_name = this_blob->name();
+          nm        = this_blob->as_nmethod_or_null();
+          //---<  blob address  >---
+          ast->print("%p", this_blob);
+          ast->fill_to(19);
+          //---<  blob offset from CodeHeap begin  >---
+          ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));
+          ast->fill_to(33);
+        } else {
+          //---<  block address  >---
+          ast->print("%p", TopSizeArray[i].start);
+          ast->fill_to(19);
+          //---<  block offset from CodeHeap begin  >---
+          ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)TopSizeArray[i].start-low_bound));
+          ast->fill_to(33);
+        }
+
+
+        //---<  print size, name, and signature (for nMethods)  >---
+        if ((nm != NULL) && (nm->method() != NULL)) {
+          ResourceMark rm;
+          //---<  nMethod size in hex  >---
+          unsigned int total_size = nm->total_size();
+          ast->print(PTR32_FORMAT, total_size);
+          ast->print("(" SIZE_FORMAT_W(4) "K)", total_size/K);
+          ast->fill_to(51);
+          ast->print("  %c", blobTypeChar[TopSizeArray[i].type]);
+          //---<  compiler information  >---
+          ast->fill_to(56);
+          ast->print("%5s %3d", compTypeName[TopSizeArray[i].compiler], TopSizeArray[i].level);
+          //---<  method temperature  >---
+          ast->fill_to(67);
+          ast->print("%5d", nm->hotness_counter());
+          //---<  name and signature  >---
+          ast->fill_to(67+6);
+          if (nm->is_in_use())      {blob_name = nm->method()->name_and_sig_as_C_string(); }
+          if (nm->is_not_entrant()) {blob_name = nm->method()->name_and_sig_as_C_string(); }
+          if (nm->is_zombie())      {ast->print("%14s", " zombie method"); }
+          ast->print("%s", blob_name);
+        } else {
+          //---<  block size in hex  >---
+          ast->print(PTR32_FORMAT, (unsigned int)(TopSizeArray[i].len<<log2_seg_size));
+          ast->print("(" SIZE_FORMAT_W(4) "K)", (TopSizeArray[i].len<<log2_seg_size)/K);
+          //---<  no compiler information  >---
+          ast->fill_to(56);
+          //---<  name and signature  >---
+          ast->fill_to(67+6);
+          ast->print("%s", blob_name);
+        }
+        STRINGSTREAM_FLUSH_LOCKED("\n")
+      }
+      if (used_topSizeBlocks != printed_topSizeBlocks) {
+        ast->print_cr("used blocks: %d, printed blocks: %d", used_topSizeBlocks, printed_topSizeBlocks);
+        STRINGSTREAM_FLUSH("")
+        for (unsigned int i = 0; i < alloc_topSizeBlocks; i++) {
+          ast->print_cr("  TopSizeArray[%d].index = %d, len = %d", i, TopSizeArray[i].index, TopSizeArray[i].len);
+          STRINGSTREAM_FLUSH("")
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("\n\n")
+    }
+  }
+
+  //-----------------------------
+  //--  Print Usage Histogram  --
+  //-----------------------------
+
+  if (SizeDistributionArray != NULL) {
+    unsigned long total_count = 0;
+    unsigned long total_size  = 0;
+    const unsigned long pctFactor = 200;
+
+    for (unsigned int i = 0; i < nSizeDistElements; i++) {
+      total_count += SizeDistributionArray[i].count;
+      total_size  += SizeDistributionArray[i].lenSum;
+    }
+
+    if ((total_count > 0) && (total_size > 0)) {
+      printBox(ast, '-', "Block count histogram for ", heapName);
+      ast->print_cr("Note: The histogram indicates how many blocks (as a percentage\n"
+                    "      of all blocks) have a size in the given range.\n"
+                    "      %ld characters are printed per percentage point.\n", pctFactor/100);
+      ast->print_cr("total size   of all blocks: %7ldM", (total_size<<log2_seg_size)/M);
+      ast->print_cr("total number of all blocks: %7ld\n", total_count);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      ast->print_cr("[Size Range)------avg.-size-+----count-+");
+      for (unsigned int i = 0; i < nSizeDistElements; i++) {
+        if (SizeDistributionArray[i].rangeStart<<log2_seg_size < K) {
+          ast->print("[" SIZE_FORMAT_W(5) " .." SIZE_FORMAT_W(5) " ): "
+                    ,(size_t)(SizeDistributionArray[i].rangeStart<<log2_seg_size)
+                    ,(size_t)(SizeDistributionArray[i].rangeEnd<<log2_seg_size)
+                    );
+        } else if (SizeDistributionArray[i].rangeStart<<log2_seg_size < M) {
+          ast->print("[" SIZE_FORMAT_W(5) "K.." SIZE_FORMAT_W(5) "K): "
+                    ,(SizeDistributionArray[i].rangeStart<<log2_seg_size)/K
+                    ,(SizeDistributionArray[i].rangeEnd<<log2_seg_size)/K
+                    );
+        } else {
+          ast->print("[" SIZE_FORMAT_W(5) "M.." SIZE_FORMAT_W(5) "M): "
+                    ,(SizeDistributionArray[i].rangeStart<<log2_seg_size)/M
+                    ,(SizeDistributionArray[i].rangeEnd<<log2_seg_size)/M
+                    );
+        }
+        ast->print(" %8d | %8d |",
+                   SizeDistributionArray[i].count > 0 ? (SizeDistributionArray[i].lenSum<<log2_seg_size)/SizeDistributionArray[i].count : 0,
+                   SizeDistributionArray[i].count);
+
+        unsigned int percent = pctFactor*SizeDistributionArray[i].count/total_count;
+        for (unsigned int j = 1; j <= percent; j++) {
+          ast->print("%c", (j%((pctFactor/100)*10) == 0) ? ('0'+j/(((unsigned int)pctFactor/100)*10)) : '*');
+        }
+        ast->cr();
+      }
+      ast->print_cr("----------------------------+----------+\n\n");
+      STRINGSTREAM_FLUSH_LOCKED("\n")
+
+      printBox(ast, '-', "Contribution per size range to total size for ", heapName);
+      ast->print_cr("Note: The histogram indicates how much space (as a percentage of all\n"
+                    "      occupied space) is used by the blocks in the given size range.\n"
+                    "      %ld characters are printed per percentage point.\n", pctFactor/100);
+      ast->print_cr("total size   of all blocks: %7ldM", (total_size<<log2_seg_size)/M);
+      ast->print_cr("total number of all blocks: %7ld\n", total_count);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      ast->print_cr("[Size Range)------avg.-size-+----count-+");
+      for (unsigned int i = 0; i < nSizeDistElements; i++) {
+        if (SizeDistributionArray[i].rangeStart<<log2_seg_size < K) {
+          ast->print("[" SIZE_FORMAT_W(5) " .." SIZE_FORMAT_W(5) " ): "
+                    ,(size_t)(SizeDistributionArray[i].rangeStart<<log2_seg_size)
+                    ,(size_t)(SizeDistributionArray[i].rangeEnd<<log2_seg_size)
+                    );
+        } else if (SizeDistributionArray[i].rangeStart<<log2_seg_size < M) {
+          ast->print("[" SIZE_FORMAT_W(5) "K.." SIZE_FORMAT_W(5) "K): "
+                    ,(SizeDistributionArray[i].rangeStart<<log2_seg_size)/K
+                    ,(SizeDistributionArray[i].rangeEnd<<log2_seg_size)/K
+                    );
+        } else {
+          ast->print("[" SIZE_FORMAT_W(5) "M.." SIZE_FORMAT_W(5) "M): "
+                    ,(SizeDistributionArray[i].rangeStart<<log2_seg_size)/M
+                    ,(SizeDistributionArray[i].rangeEnd<<log2_seg_size)/M
+                    );
+        }
+        ast->print(" %8d | %8d |",
+                   SizeDistributionArray[i].count > 0 ? (SizeDistributionArray[i].lenSum<<log2_seg_size)/SizeDistributionArray[i].count : 0,
+                   SizeDistributionArray[i].count);
+
+        unsigned int percent = pctFactor*(unsigned long)SizeDistributionArray[i].lenSum/total_size;
+        for (unsigned int j = 1; j <= percent; j++) {
+          ast->print("%c", (j%((pctFactor/100)*10) == 0) ? ('0'+j/(((unsigned int)pctFactor/100)*10)) : '*');
+        }
+        ast->cr();
+      }
+      ast->print_cr("----------------------------+----------+");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+}
+
+
+void CodeHeapState::print_freeSpace(outputStream* out, CodeHeap* heap) {
+  if (!initialization_complete) {
+    return;
+  }
+
+  const char* heapName   = get_heapName(heap);
+  get_HeapStatGlobals(out, heapName);
+
+  if ((StatArray == NULL) || (FreeArray == NULL) || (alloc_granules == 0)) {
+    return;
+  }
+  STRINGSTREAM_DECL(ast, out)
+
+  {
+    printBox(ast, '=', "F R E E   S P A C E   S T A T I S T I C S   for ", heapName);
+    ast->print_cr("Note: in this context, a gap is the occupied space between two free blocks.\n"
+                  "      Those gaps are of interest if there is a chance that they become\n"
+                  "      unoccupied, e.g. by class unloading. Then, the two adjacent free\n"
+                  "      blocks, together with the now unoccupied space, form a new, large\n"
+                  "      free block.");
+    STRINGSTREAM_FLUSH_LOCKED("\n")
+  }
+
+  {
+    printBox(ast, '-', "List of all Free Blocks in ", heapName);
+    STRINGSTREAM_FLUSH_LOCKED("")
+
+    unsigned int ix = 0;
+    for (ix = 0; ix < alloc_freeBlocks-1; ix++) {
+      ast->print("%p: Len[%4d] = " HEX32_FORMAT ",", FreeArray[ix].start, ix, FreeArray[ix].len);
+      ast->fill_to(38);
+      ast->print("Gap[%4d..%4d]: " HEX32_FORMAT " bytes,", ix, ix+1, FreeArray[ix].gap);
+      ast->fill_to(71);
+      ast->print("block count: %6d", FreeArray[ix].n_gapBlocks);
+      if (FreeArray[ix].stubs_in_gap) {
+        ast->print(" !! permanent gap, contains stubs and/or blobs !!");
+      }
+      STRINGSTREAM_FLUSH_LOCKED("\n")
+    }
+    ast->print_cr("%p: Len[%4d] = " HEX32_FORMAT, FreeArray[ix].start, ix, FreeArray[ix].len);
+    STRINGSTREAM_FLUSH_LOCKED("\n\n")
+  }
+
+
+  //-----------------------------------------
+  //--  Find and Print Top Ten Free Blocks --
+  //-----------------------------------------
+
+  //---<  find Top Ten Free Blocks  >---
+  const unsigned int nTop = 10;
+  unsigned int  currMax10 = 0;
+  struct FreeBlk* FreeTopTen[nTop];
+  memset(FreeTopTen, 0, sizeof(FreeTopTen));
+
+  for (unsigned int ix = 0; ix < alloc_freeBlocks; ix++) {
+    if (FreeArray[ix].len > currMax10) {  // larger than the ten largest found so far
+      unsigned int currSize = FreeArray[ix].len;
+
+      unsigned int iy;
+      for (iy = 0; iy < nTop && FreeTopTen[iy] != NULL; iy++) {
+        if (FreeTopTen[iy]->len < currSize) {
+          for (unsigned int iz = nTop-1; iz > iy; iz--) { // make room to insert new free block
+            FreeTopTen[iz] = FreeTopTen[iz-1];
+          }
+          FreeTopTen[iy] = &FreeArray[ix];        // insert new free block
+          if (FreeTopTen[nTop-1] != NULL) {
+            currMax10 = FreeTopTen[nTop-1]->len;
+          }
+          break; // done with this, check next free block
+        }
+      }
+      if (iy >= nTop) {
+        ast->print_cr("Internal logic error. New Max10 = %d detected, but could not be merged. Old Max10 = %d",
+                      currSize, currMax10);
+        continue;
+      }
+      if (FreeTopTen[iy] == NULL) {
+        FreeTopTen[iy] = &FreeArray[ix];
+        if (iy == (nTop-1)) {
+          currMax10 = currSize;
+        }
+      }
+    }
+  }
+  STRINGSTREAM_FLUSH_LOCKED("")
+
+  {
+    printBox(ast, '-', "Top Ten Free Blocks in ", heapName);
+
+    //---<  print Top Ten Free Blocks  >---
+    for (unsigned int iy = 0; (iy < nTop) && (FreeTopTen[iy] != NULL); iy++) {
+      ast->print("Pos %3d: Block %4d - size " HEX32_FORMAT ",", iy+1, FreeTopTen[iy]->index, FreeTopTen[iy]->len);
+      ast->fill_to(39);
+      if (FreeTopTen[iy]->index == (alloc_freeBlocks-1)) {
+        ast->print("last free block in list.");
+      } else {
+        ast->print("Gap (to next) " HEX32_FORMAT ",", FreeTopTen[iy]->gap);
+        ast->fill_to(63);
+        ast->print("#blocks (in gap) %d", FreeTopTen[iy]->n_gapBlocks);
+      }
+      ast->cr();
+    }
+    STRINGSTREAM_FLUSH_LOCKED("\n\n")
+  }
+
+
+  //--------------------------------------------------------
+  //--  Find and Print Top Ten Free-Occupied-Free Triples --
+  //--------------------------------------------------------
+
+  //---<  find and print Top Ten Triples (Free-Occupied-Free)  >---
+  currMax10 = 0;
+  struct FreeBlk  *FreeTopTenTriple[nTop];
+  memset(FreeTopTenTriple, 0, sizeof(FreeTopTenTriple));
+
+  for (unsigned int ix = 0; ix < alloc_freeBlocks-1; ix++) {
+    // If there are stubs in the gap, this gap will never become completely free.
+    // The triple will thus never merge to one free block.
+    unsigned int lenTriple  = FreeArray[ix].len + (FreeArray[ix].stubs_in_gap ? 0 : FreeArray[ix].gap + FreeArray[ix+1].len);
+    FreeArray[ix].len = lenTriple;
+    if (lenTriple > currMax10) {  // larger than the ten largest found so far
+
+      unsigned int iy;
+      for (iy = 0; (iy < nTop) && (FreeTopTenTriple[iy] != NULL); iy++) {
+        if (FreeTopTenTriple[iy]->len < lenTriple) {
+          for (unsigned int iz = nTop-1; iz > iy; iz--) {
+            FreeTopTenTriple[iz] = FreeTopTenTriple[iz-1];
+          }
+          FreeTopTenTriple[iy] = &FreeArray[ix];
+          if (FreeTopTenTriple[nTop-1] != NULL) {
+            currMax10 = FreeTopTenTriple[nTop-1]->len;
+          }
+          break;
+        }
+      }
+      if (iy == nTop) {
+        ast->print_cr("Internal logic error. New Max10 = %d detected, but could not be merged. Old Max10 = %d",
+                      lenTriple, currMax10);
+        continue;
+      }
+      if (FreeTopTenTriple[iy] == NULL) {
+        FreeTopTenTriple[iy] = &FreeArray[ix];
+        if (iy == (nTop-1)) {
+          currMax10 = lenTriple;
+        }
+      }
+    }
+  }
+  STRINGSTREAM_FLUSH_LOCKED("")
+
+  {
+    printBox(ast, '-', "Top Ten Free-Occupied-Free Triples in ", heapName);
+    ast->print_cr("  Use this information to judge how likely it is that a large(r) free block\n"
+                  "  might get created by code cache sweeping.\n"
+                  "  If all the occupied blocks can be swept, the three free blocks will be\n"
+                  "  merged into one (much larger) free block. That would reduce free space\n"
+                  "  fragmentation.\n");
+
+    //---<  print Top Ten Free-Occupied-Free Triples  >---
+    for (unsigned int iy = 0; (iy < nTop) && (FreeTopTenTriple[iy] != NULL); iy++) {
+      ast->print("Pos %3d: Block %4d - size " HEX32_FORMAT ",", iy+1, FreeTopTenTriple[iy]->index, FreeTopTenTriple[iy]->len);
+      ast->fill_to(39);
+      ast->print("Gap (to next) " HEX32_FORMAT ",", FreeTopTenTriple[iy]->gap);
+      ast->fill_to(63);
+      ast->print("#blocks (in gap) %d", FreeTopTenTriple[iy]->n_gapBlocks);
+      ast->cr();
+    }
+    STRINGSTREAM_FLUSH_LOCKED("\n\n")
+  }
+}
+
+
+void CodeHeapState::print_count(outputStream* out, CodeHeap* heap) {
+  if (!initialization_complete) {
+    return;
+  }
+
+  const char* heapName   = get_heapName(heap);
+  get_HeapStatGlobals(out, heapName);
+
+  if ((StatArray == NULL) || (alloc_granules == 0)) {
+    return;
+  }
+  STRINGSTREAM_DECL(ast, out)
+
+  unsigned int granules_per_line = 32;
+  char*        low_bound         = heap->low_boundary();
+
+  {
+    printBox(ast, '=', "B L O C K   C O U N T S   for ", heapName);
+    ast->print_cr("  Each granule contains an individual number of heap blocks. Large blocks\n"
+                  "  may span multiple granules and are counted for each granule they touch.\n");
+    if (segment_granules) {
+      ast->print_cr("  You have selected granule size to be as small as segment size.\n"
+                    "  As a result, each granule contains exactly one block (or a part of one block)\n"
+                    "  or is displayed as empty (' ') if it's BlobType does not match the selection.\n"
+                    "  Occupied granules show their BlobType character, see legend.\n");
+      print_blobType_legend(ast);
+    }
+    STRINGSTREAM_FLUSH_LOCKED("")
+  }
+
+  {
+    if (segment_granules) {
+      printBox(ast, '-', "Total (all types) count for granule size == segment size", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        print_blobType_single(ast, StatArray[ix].type);
+      }
+    } else {
+      printBox(ast, '-', "Total (all tiers) count, 0x1..0xf. '*' indicates >= 16 blocks, ' ' indicates empty", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        unsigned int count = StatArray[ix].t1_count   + StatArray[ix].t2_count   + StatArray[ix].tx_count
+                           + StatArray[ix].stub_count + StatArray[ix].dead_count;
+        print_count_single(ast, count);
+      }
+    }
+    STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+  }
+
+  {
+    if (nBlocks_t1 > 0) {
+      printBox(ast, '-', "Tier1 nMethod count only, 0x1..0xf. '*' indicates >= 16 blocks, ' ' indicates empty", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].t1_count > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_count_single(ast, StatArray[ix].t1_count);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier1 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_t2 > 0) {
+      printBox(ast, '-', "Tier2 nMethod count only, 0x1..0xf. '*' indicates >= 16 blocks, ' ' indicates empty", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].t2_count > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_count_single(ast, StatArray[ix].t2_count);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier2 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_alive > 0) {
+      printBox(ast, '-', "not_used/not_entrant nMethod count only, 0x1..0xf. '*' indicates >= 16 blocks, ' ' indicates empty", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].tx_count > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_count_single(ast, StatArray[ix].tx_count);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No not_used/not_entrant nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_stub > 0) {
+      printBox(ast, '-', "Stub & Blob count only, 0x1..0xf. '*' indicates >= 16 blocks, ' ' indicates empty", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].stub_count > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_count_single(ast, StatArray[ix].stub_count);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Stubs and Blobs found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_dead > 0) {
+      printBox(ast, '-', "Dead nMethod count only, 0x1..0xf. '*' indicates >= 16 blocks, ' ' indicates empty", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].dead_count > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_count_single(ast, StatArray[ix].dead_count);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No dead nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (!segment_granules) { // Prevent totally redundant printouts
+      printBox(ast, '-', "Count by tier (combined, no dead blocks): <#t1>:<#t2>:<#s>, 0x0..0xf. '*' indicates >= 16 blocks", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 24;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+
+        print_count_single(ast, StatArray[ix].t1_count);
+        ast->print(":");
+        print_count_single(ast, StatArray[ix].t2_count);
+        ast->print(":");
+        if (segment_granules && StatArray[ix].stub_count > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_count_single(ast, StatArray[ix].stub_count);
+        }
+        ast->print(" ");
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    }
+  }
+}
+
+
+void CodeHeapState::print_space(outputStream* out, CodeHeap* heap) {
+  if (!initialization_complete) {
+    return;
+  }
+
+  const char* heapName   = get_heapName(heap);
+  get_HeapStatGlobals(out, heapName);
+
+  if ((StatArray == NULL) || (alloc_granules == 0)) {
+    return;
+  }
+  STRINGSTREAM_DECL(ast, out)
+
+  unsigned int granules_per_line = 32;
+  char*        low_bound         = heap->low_boundary();
+
+  {
+    printBox(ast, '=', "S P A C E   U S A G E  &  F R A G M E N T A T I O N   for ", heapName);
+    ast->print_cr("  The heap space covered by one granule is occupied to a various extend.\n"
+                  "  The granule occupancy is displayed by one decimal digit per granule.\n");
+    if (segment_granules) {
+      ast->print_cr("  You have selected granule size to be as small as segment size.\n"
+                    "  As a result, each granule contains exactly one block (or a part of one block)\n"
+                    "  or is displayed as empty (' ') if it's BlobType does not match the selection.\n"
+                    "  Occupied granules show their BlobType character, see legend.\n");
+      print_blobType_legend(ast);
+    } else {
+      ast->print_cr("  These digits represent a fill percentage range (see legend).\n");
+      print_space_legend(ast);
+    }
+    STRINGSTREAM_FLUSH_LOCKED("")
+  }
+
+  {
+    if (segment_granules) {
+      printBox(ast, '-', "Total (all types) space consumption for granule size == segment size", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        print_blobType_single(ast, StatArray[ix].type);
+      }
+    } else {
+      printBox(ast, '-', "Total (all types) space consumption. ' ' indicates empty, '*' indicates full.", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        unsigned int space    = StatArray[ix].t1_space   + StatArray[ix].t2_space  + StatArray[ix].tx_space
+                              + StatArray[ix].stub_space + StatArray[ix].dead_space;
+        print_space_single(ast, space);
+      }
+    }
+    STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+  }
+
+  {
+    if (nBlocks_t1 > 0) {
+      printBox(ast, '-', "Tier1 space consumption. ' ' indicates empty, '*' indicates full", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].t1_space > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_space_single(ast, StatArray[ix].t1_space);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier1 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_t2 > 0) {
+      printBox(ast, '-', "Tier2 space consumption. ' ' indicates empty, '*' indicates full", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].t2_space > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_space_single(ast, StatArray[ix].t2_space);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier2 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_alive > 0) {
+      printBox(ast, '-', "not_used/not_entrant space consumption. ' ' indicates empty, '*' indicates full", NULL);
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].tx_space > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_space_single(ast, StatArray[ix].tx_space);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier2 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_stub > 0) {
+      printBox(ast, '-', "Stub and Blob space consumption. ' ' indicates empty, '*' indicates full", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].stub_space > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_space_single(ast, StatArray[ix].stub_space);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Stubs and Blobs found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_dead > 0) {
+      printBox(ast, '-', "Dead space consumption. ' ' indicates empty, '*' indicates full", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        print_space_single(ast, StatArray[ix].dead_space);
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No dead nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (!segment_granules) { // Prevent totally redundant printouts
+      printBox(ast, '-', "Space consumption by tier (combined): <t1%>:<t2%>:<s%>. ' ' indicates empty, '*' indicates full", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 24;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+
+        if (segment_granules && StatArray[ix].t1_space > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_space_single(ast, StatArray[ix].t1_space);
+        }
+        ast->print(":");
+        if (segment_granules && StatArray[ix].t2_space > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_space_single(ast, StatArray[ix].t2_space);
+        }
+        ast->print(":");
+        if (segment_granules && StatArray[ix].stub_space > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_space_single(ast, StatArray[ix].stub_space);
+        }
+        ast->print(" ");
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    }
+  }
+}
+
+void CodeHeapState::print_age(outputStream* out, CodeHeap* heap) {
+  if (!initialization_complete) {
+    return;
+  }
+
+  const char* heapName   = get_heapName(heap);
+  get_HeapStatGlobals(out, heapName);
+
+  if ((StatArray == NULL) || (alloc_granules == 0)) {
+    return;
+  }
+  STRINGSTREAM_DECL(ast, out)
+
+  unsigned int granules_per_line = 32;
+  char*        low_bound         = heap->low_boundary();
+
+  {
+    printBox(ast, '=', "M E T H O D   A G E   by CompileID for ", heapName);
+    ast->print_cr("  The age of a compiled method in the CodeHeap is not available as a\n"
+                  "  time stamp. Instead, a relative age is deducted from the method's compilation ID.\n"
+                  "  Age information is available for tier1 and tier2 methods only. There is no\n"
+                  "  age information for stubs and blobs, because they have no compilation ID assigned.\n"
+                  "  Information for the youngest method (highest ID) in the granule is printed.\n"
+                  "  Refer to the legend to learn how method age is mapped to the displayed digit.");
+    print_age_legend(ast);
+    STRINGSTREAM_FLUSH_LOCKED("")
+  }
+
+  {
+    printBox(ast, '-', "Age distribution. '0' indicates youngest 1/256, '8': oldest half, ' ': no age information", NULL);
+    STRINGSTREAM_FLUSH_LOCKED("")
+
+    granules_per_line = 128;
+    for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+      print_line_delim(out, ast, low_bound, ix, granules_per_line);
+      unsigned int age1      = StatArray[ix].t1_age;
+      unsigned int age2      = StatArray[ix].t2_age;
+      unsigned int agex      = StatArray[ix].tx_age;
+      unsigned int age       = age1 > age2 ? age1 : age2;
+      age       = age > agex ? age : agex;
+      print_age_single(ast, age);
+    }
+    STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+  }
+
+  {
+    if (nBlocks_t1 > 0) {
+      printBox(ast, '-', "Tier1 age distribution. '0' indicates youngest 1/256, '8': oldest half, ' ': no age information", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        print_age_single(ast, StatArray[ix].t1_age);
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier1 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_t2 > 0) {
+      printBox(ast, '-', "Tier2 age distribution. '0' indicates youngest 1/256, '8': oldest half, ' ': no age information", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        print_age_single(ast, StatArray[ix].t2_age);
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier2 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_alive > 0) {
+      printBox(ast, '-', "not_used/not_entrant age distribution. '0' indicates youngest 1/256, '8': oldest half, ' ': no age information", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        print_age_single(ast, StatArray[ix].tx_age);
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier2 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (!segment_granules) { // Prevent totally redundant printouts
+      printBox(ast, '-', "age distribution by tier <a1>:<a2>. '0' indicates youngest 1/256, '8': oldest half, ' ': no age information", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 32;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        print_age_single(ast, StatArray[ix].t1_age);
+        ast->print(":");
+        print_age_single(ast, StatArray[ix].t2_age);
+        ast->print(" ");
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    }
+  }
+}
+
+
+void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) {
+  if (!initialization_complete) {
+    return;
+  }
+
+  const char* heapName   = get_heapName(heap);
+  get_HeapStatGlobals(out, heapName);
+
+  if ((StatArray == NULL) || (alloc_granules == 0)) {
+    return;
+  }
+  STRINGSTREAM_DECL(ast, out)
+
+  unsigned int granules_per_line  = 128;
+  char*        low_bound          = heap->low_boundary();
+  CodeBlob*    last_blob          = NULL;
+  bool         name_in_addr_range = true;
+
+  //---<  print at least 128K per block  >---
+  if (granules_per_line*granule_size < 128*K) {
+    granules_per_line = (unsigned int)((128*K)/granule_size);
+  }
+
+  printBox(ast, '=', "M E T H O D   N A M E S   for ", heapName);
+  ast->print_cr("  Method names are dynamically retrieved from the code cache at print time.\n"
+                "  Due to the living nature of the code heap and because the CodeCache_lock\n"
+                "  is not continuously held, the displayed name might be wrong or no name\n"
+                "  might be found at all. The likelihood for that to happen increases\n"
+                "  over time passed between analysis and print step.\n");
+  STRINGSTREAM_FLUSH_LOCKED("")
+
+  for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+    //---<  print a new blob on a new line  >---
+    if (ix%granules_per_line == 0) {
+      if (!name_in_addr_range) {
+        ast->print_cr("No methods, blobs, or stubs found in this address range");
+      }
+      name_in_addr_range = false;
+
+      ast->cr();
+      ast->print_cr("--------------------------------------------------------------------");
+      ast->print_cr("Address range [%p,%p), " SIZE_FORMAT "k", low_bound+ix*granule_size, low_bound+(ix+granules_per_line)*granule_size, granules_per_line*granule_size/(size_t)K);
+      ast->print_cr("--------------------------------------------------------------------");
+      STRINGSTREAM_FLUSH_LOCKED("")
+    }
+    // Only check granule if it contains at least one blob.
+    unsigned int nBlobs  = StatArray[ix].t1_count   + StatArray[ix].t2_count + StatArray[ix].tx_count +
+                           StatArray[ix].stub_count + StatArray[ix].dead_count;
+    if (nBlobs > 0 ) {
+    for (unsigned int is = 0; is < granule_size; is+=(unsigned int)seg_size) {
+      // heap->find_start() is safe. Only working with _segmap. Returns NULL or void*. Returned CodeBlob may be uninitialized.
+      CodeBlob* this_blob = (CodeBlob *)(heap->find_start(low_bound+ix*granule_size+is));
+      bool blob_initialized = (this_blob != NULL) &&
+                              ((char*)this_blob + this_blob->header_size() == (char*)(this_blob->relocation_begin())) &&
+                              ((char*)this_blob + CodeBlob::align_code_offset(this_blob->header_size() + this_blob->relocation_size()) == (char*)(this_blob->content_begin()));
+      if (blob_initialized && (this_blob != last_blob)) {
+        if (!name_in_addr_range) {
+          name_in_addr_range = true;
+          ast->fill_to(51);
+          ast->print("%9s", "compiler");
+          ast->fill_to(61);
+          ast->print_cr("%6s", "method");
+          ast->print_cr("%18s %13s %17s %9s  %5s %18s  %s", "Addr(module)      ", "offset", "size", " type lvl", " temp", "blobType          ", "Name");
+        }
+
+        //---<  Print blobTypeName as recorded during analysis  >---
+        ast->print("%p", this_blob);
+        ast->fill_to(19);
+        ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));
+        ast->fill_to(33);
+
+        //---<  print size, name, and signature (for nMethods)  >---
+        // this_blob->name() could return NULL if no name is given to CTOR. Inlined, maybe not visible on stack
+        const char* blob_name = this_blob->name();
+        if (blob_name == 0) {
+          blob_name = "<unavailable>";
+        }
+        // this_blob->as_nmethod_or_null() is safe. Inlined, maybe not visible on stack.
+        nmethod*           nm = this_blob->as_nmethod_or_null();
+        blobType       cbType = noType;
+        if (segment_granules) {
+          cbType = (blobType)StatArray[ix].type;
+        } else {
+          cbType = get_cbType(this_blob);
+        }
+        if ((nm != NULL) && (nm->method() != NULL)) {
+          ResourceMark rm;
+          //---<  nMethod size in hex  >---
+          unsigned int total_size = nm->total_size();
+          ast->print(PTR32_FORMAT, total_size);
+          ast->print("(" SIZE_FORMAT_W(4) "K)", total_size/K);
+          //---<  compiler information  >---
+          ast->fill_to(51);
+          ast->print("%5s %3d", compTypeName[StatArray[ix].compiler], StatArray[ix].level);
+          //---<  method temperature  >---
+          ast->fill_to(62);
+          ast->print("%5d", nm->hotness_counter());
+          //---<  name and signature  >---
+          ast->fill_to(62+6);
+          ast->print("%s", blobTypeName[cbType]);
+          ast->fill_to(82+6);
+          if (nm->is_in_use()) {
+            blob_name = nm->method()->name_and_sig_as_C_string();
+          }
+          if (nm->is_not_entrant()) {
+            blob_name = nm->method()->name_and_sig_as_C_string();
+          }
+          if (nm->is_zombie()) {
+            ast->print("%14s", " zombie method");
+          }
+          ast->print("%s", blob_name);
+        } else {
+          ast->fill_to(62+6);
+          ast->print("%s", blobTypeName[cbType]);
+          ast->fill_to(82+6);
+          ast->print("%s", blob_name);
+        }
+        STRINGSTREAM_FLUSH_LOCKED("\n")
+        last_blob          = this_blob;
+      } else if (!blob_initialized && (this_blob != NULL)) {
+        last_blob          = this_blob;
+      }
+    }
+    }
+  }
+  STRINGSTREAM_FLUSH_LOCKED("\n\n")
+}
+
+
+void CodeHeapState::printBox(outputStream* ast, const char border, const char* text1, const char* text2) {
+  unsigned int lineLen = 1 + 2 + 2 + 1;
+  char edge, frame;
+
+  if (text1 != NULL) {
+    lineLen += (unsigned int)strlen(text1); // text1 is much shorter than MAX_INT chars.
+  }
+  if (text2 != NULL) {
+    lineLen += (unsigned int)strlen(text2); // text2 is much shorter than MAX_INT chars.
+  }
+  if (border == '-') {
+    edge  = '+';
+    frame = '|';
+  } else {
+    edge  = border;
+    frame = border;
+  }
+
+  ast->print("%c", edge);
+  for (unsigned int i = 0; i < lineLen-2; i++) {
+    ast->print("%c", border);
+  }
+  ast->print_cr("%c", edge);
+
+  ast->print("%c  ", frame);
+  if (text1 != NULL) {
+    ast->print("%s", text1);
+  }
+  if (text2 != NULL) {
+    ast->print("%s", text2);
+  }
+  ast->print_cr("  %c", frame);
+
+  ast->print("%c", edge);
+  for (unsigned int i = 0; i < lineLen-2; i++) {
+    ast->print("%c", border);
+  }
+  ast->print_cr("%c", edge);
+}
+
+void CodeHeapState::print_blobType_legend(outputStream* out) {
+  out->cr();
+  printBox(out, '-', "Block types used in the following CodeHeap dump", NULL);
+  for (int type = noType; type < lastType; type += 1) {
+    out->print_cr("  %c - %s", blobTypeChar[type], blobTypeName[type]);
+  }
+  out->print_cr("  -----------------------------------------------------");
+  out->cr();
+}
+
+void CodeHeapState::print_space_legend(outputStream* out) {
+  unsigned int indicator = 0;
+  unsigned int age_range = 256;
+  unsigned int range_beg = latest_compilation_id;
+  out->cr();
+  printBox(out, '-', "Space ranges, based on granule occupancy", NULL);
+  out->print_cr("    -   0%% == occupancy");
+  for (int i=0; i<=9; i++) {
+    out->print_cr("  %d - %3d%% < occupancy < %3d%%", i, 10*i, 10*(i+1));
+  }
+  out->print_cr("  * - 100%% == occupancy");
+  out->print_cr("  ----------------------------------------------");
+  out->cr();
+}
+
+void CodeHeapState::print_age_legend(outputStream* out) {
+  unsigned int indicator = 0;
+  unsigned int age_range = 256;
+  unsigned int range_beg = latest_compilation_id;
+  out->cr();
+  printBox(out, '-', "Age ranges, based on compilation id", NULL);
+  while (age_range > 0) {
+    out->print_cr("  %d - %6d to %6d", indicator, range_beg, latest_compilation_id - latest_compilation_id/age_range);
+    range_beg = latest_compilation_id - latest_compilation_id/age_range;
+    age_range /= 2;
+    indicator += 1;
+  }
+  out->print_cr("  -----------------------------------------");
+  out->cr();
+}
+
+void CodeHeapState::print_blobType_single(outputStream* out, u2 /* blobType */ type) {
+  out->print("%c", blobTypeChar[type]);
+}
+
+void CodeHeapState::print_count_single(outputStream* out, unsigned short count) {
+  if (count >= 16)    out->print("*");
+  else if (count > 0) out->print("%1.1x", count);
+  else                out->print(" ");
+}
+
+void CodeHeapState::print_space_single(outputStream* out, unsigned short space) {
+  size_t  space_in_bytes = ((unsigned int)space)<<log2_seg_size;
+  char    fraction       = (space == 0) ? ' ' : (space_in_bytes >= granule_size-1) ? '*' : char('0'+10*space_in_bytes/granule_size);
+  out->print("%c", fraction);
+}
+
+void CodeHeapState::print_age_single(outputStream* out, unsigned int age) {
+  unsigned int indicator = 0;
+  unsigned int age_range = 256;
+  if (age > 0) {
+    while ((age_range > 0) && (latest_compilation_id-age > latest_compilation_id/age_range)) {
+      age_range /= 2;
+      indicator += 1;
+    }
+    out->print("%c", char('0'+indicator));
+  } else {
+    out->print(" ");
+  }
+}
+
+void CodeHeapState::print_line_delim(outputStream* out, outputStream* ast, char* low_bound, unsigned int ix, unsigned int gpl) {
+  if (ix % gpl == 0) {
+    if (ix > 0) {
+      ast->print("|");
+    }
+    ast->cr();
+    assert(out == ast, "must use the same stream!");
+
+    ast->print("%p", low_bound + ix*granule_size);
+    ast->fill_to(19);
+    ast->print("(+" PTR32_FORMAT "): |", (unsigned int)(ix*granule_size));
+  }
+}
+
+void CodeHeapState::print_line_delim(outputStream* out, bufferedStream* ast, char* low_bound, unsigned int ix, unsigned int gpl) {
+  assert(out != ast, "must not use the same stream!");
+  if (ix % gpl == 0) {
+    if (ix > 0) {
+      ast->print("|");
+    }
+    ast->cr();
+
+    { // can't use STRINGSTREAM_FLUSH_LOCKED("") here.
+      ttyLocker ttyl;
+      out->print("%s", ast->as_string());
+      ast->reset();
+    }
+
+    ast->print("%p", low_bound + ix*granule_size);
+    ast->fill_to(19);
+    ast->print("(+" PTR32_FORMAT "): |", (unsigned int)(ix*granule_size));
+  }
+}
+
+CodeHeapState::blobType CodeHeapState::get_cbType(CodeBlob* cb) {
+  if (cb != NULL ) {
+    if (cb->is_runtime_stub())                return runtimeStub;
+    if (cb->is_deoptimization_stub())         return deoptimizationStub;
+    if (cb->is_uncommon_trap_stub())          return uncommonTrapStub;
+    if (cb->is_exception_stub())              return exceptionStub;
+    if (cb->is_safepoint_stub())              return safepointStub;
+    if (cb->is_adapter_blob())                return adapterBlob;
+    if (cb->is_method_handles_adapter_blob()) return mh_adapterBlob;
+    if (cb->is_buffer_blob())                 return bufferBlob;
+
+    if (cb->is_nmethod() ) {
+      if (((nmethod*)cb)->is_in_use())        return nMethod_inuse;
+      if (((nmethod*)cb)->is_alive() && !(((nmethod*)cb)->is_not_entrant()))   return nMethod_notused;
+      if (((nmethod*)cb)->is_alive())         return nMethod_alive;
+      if (((nmethod*)cb)->is_unloaded())      return nMethod_unloaded;
+      if (((nmethod*)cb)->is_zombie())        return nMethod_dead;
+      tty->print_cr("unhandled nmethod state");
+      return nMethod_dead;
+    }
+  }
+  return noType;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/code/codeHeapState.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_CODE_CODEHEAPSTATE_HPP
+#define SHARE_CODE_CODEHEAPSTATE_HPP
+
+#include "memory/heap.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/ostream.hpp"
+
+class CodeHeapState : public CHeapObj<mtCode> {
+
+ public:
+  enum compType {
+    noComp = 0,     // must be! due to initialization by memset to zero
+    c1,
+    c2,
+    jvmci,
+    lastComp
+  };
+
+  enum blobType {
+     noType = 0,         // must be! due to initialization by memset to zero
+     // The nMethod_* values correspond 1:1 to the CompiledMethod enum values.
+     nMethod_inuse,       // executable. This is the "normal" state for a nmethod.
+     nMethod_notused,     // assumed inactive, marked not entrant. Could be revived if necessary.
+     nMethod_notentrant,  // no new activations allowed, marked for deoptimization. Old activations may still exist.
+                         // Will transition to "zombie" after all activations are gone.
+     nMethod_zombie,      // No more activations exist, ready for purge (remove from code cache).
+     nMethod_unloaded,    // No activations exist, should not be called. Transient state on the way to "zombie".
+     nMethod_alive = nMethod_notentrant, // Combined state: nmethod may have activations, thus can't be purged.
+     nMethod_dead  = nMethod_zombie,     // Combined state: nmethod does not have any activations.
+     runtimeStub   = nMethod_unloaded + 1,
+     ricochetStub,
+     deoptimizationStub,
+     uncommonTrapStub,
+     exceptionStub,
+     safepointStub,
+     adapterBlob,
+     mh_adapterBlob,
+     bufferBlob,
+     lastType
+  };
+
+ private:
+  static void prepare_StatArray(outputStream* out, size_t nElem, size_t granularity, const char* heapName);
+  static void prepare_FreeArray(outputStream* out, unsigned int nElem, const char* heapName);
+  static void prepare_TopSizeArray(outputStream* out, unsigned int nElem, const char* heapName);
+  static void prepare_SizeDistArray(outputStream* out, unsigned int nElem, const char* heapName);
+  static void discard_StatArray(outputStream* out);
+  static void discard_FreeArray(outputStream* out);
+  static void discard_TopSizeArray(outputStream* out);
+  static void discard_SizeDistArray(outputStream* out);
+
+  static void update_SizeDistArray(outputStream* out, unsigned int len);
+
+  static const char* get_heapName(CodeHeap* heap);
+  static unsigned int findHeapIndex(outputStream* out, const char* heapName);
+  static void get_HeapStatGlobals(outputStream* out, const char* heapName);
+  static void set_HeapStatGlobals(outputStream* out, const char* heapName);
+
+  static void printBox(outputStream* out, const char border, const char* text1, const char* text2);
+  static void print_blobType_legend(outputStream* out);
+  static void print_space_legend(outputStream* out);
+  static void print_age_legend(outputStream* out);
+  static void print_blobType_single(outputStream *ast, u2 /* blobType */ type);
+  static void print_count_single(outputStream *ast, unsigned short count);
+  static void print_space_single(outputStream *ast, unsigned short space);
+  static void print_age_single(outputStream *ast, unsigned int age);
+  static void print_line_delim(outputStream* out, bufferedStream *sst, char* low_bound, unsigned int ix, unsigned int gpl);
+  static void print_line_delim(outputStream* out, outputStream *sst, char* low_bound, unsigned int ix, unsigned int gpl);
+  static blobType get_cbType(CodeBlob* cb);
+
+ public:
+  static void discard(outputStream* out, CodeHeap* heap);
+  static void aggregate(outputStream* out, CodeHeap* heap, const char* granularity);
+  static void print_usedSpace(outputStream* out, CodeHeap* heap);
+  static void print_freeSpace(outputStream* out, CodeHeap* heap);
+  static void print_count(outputStream* out, CodeHeap* heap);
+  static void print_space(outputStream* out, CodeHeap* heap);
+  static void print_age(outputStream* out, CodeHeap* heap);
+  static void print_names(outputStream* out, CodeHeap* heap);
+};
+
+//----------------
+//  StatElement
+//----------------
+//  Each analysis granule is represented by an instance of
+//  this StatElement struct. It collects and aggregates all
+//  information describing the allocated contents of the granule.
+//  Free (unallocated) contents is not considered (see FreeBlk for that).
+//  All StatElements of a heap segment are stored in the related StatArray.
+//  Current size: 40 bytes + 8 bytes class header.
+class StatElement : public CHeapObj<mtCode> {
+  public:
+    // A note on ages: The compilation_id easily overflows unsigned short in large systems
+    unsigned int       t1_age;      // oldest compilation_id of tier1 nMethods.
+    unsigned int       t2_age;      // oldest compilation_id of tier2 nMethods.
+    unsigned int       tx_age;      // oldest compilation_id of inactive/not entrant nMethods.
+    unsigned short     t1_space;    // in units of _segment_size to "prevent" overflow
+    unsigned short     t2_space;    // in units of _segment_size to "prevent" overflow
+    unsigned short     tx_space;    // in units of _segment_size to "prevent" overflow
+    unsigned short     dead_space;  // in units of _segment_size to "prevent" overflow
+    unsigned short     stub_space;  // in units of _segment_size to "prevent" overflow
+    unsigned short     t1_count;
+    unsigned short     t2_count;
+    unsigned short     tx_count;
+    unsigned short     dead_count;
+    unsigned short     stub_count;
+    CompLevel          level;       // optimization level (see globalDefinitions.hpp)
+    //---<  replaced the correct enum typing with u2 to save space.
+    u2                 compiler;    // compiler which generated this blob. Type is CodeHeapState::compType
+    u2                 type;        // used only if granularity == segment_size. Type is CodeHeapState::blobType
+};
+
+//-----------
+//  FreeBlk
+//-----------
+//  Each free block in the code heap is represented by an instance
+//  of this FreeBlk struct. It collects all information we need to
+//  know about each free block.
+//  All FreeBlks of a heap segment are stored in the related FreeArray.
+struct FreeBlk : public CHeapObj<mtCode> {
+  HeapBlock*     start;       // address of free block
+  unsigned int   len;          // length of free block
+
+  unsigned int   gap;          // gap to next free block
+  unsigned int   index;        // sequential number of free block
+  unsigned short n_gapBlocks;  // # used blocks in gap
+  bool           stubs_in_gap; // The occupied space between this and the next free block contains (unmovable) stubs or blobs.
+};
+
+//--------------
+//  TopSizeBlk
+//--------------
+//  The n largest blocks in the code heap are represented in an instance
+//  of this TopSizeBlk struct. It collects all information we need to
+//  know about those largest blocks.
+//  All TopSizeBlks of a heap segment are stored in the related TopSizeArray.
+struct TopSizeBlk : public CHeapObj<mtCode> {
+  HeapBlock*     start;       // address of block
+  unsigned int   len;          // length of block, in _segment_size units. Will never overflow int.
+
+  unsigned int   index;        // ordering index, 0 is largest block
+                               // contains array index of next smaller block
+                               // -1 indicates end of list
+  CompLevel      level;        // optimization level (see globalDefinitions.hpp)
+  u2             compiler;     // compiler which generated this blob
+  u2             type;         // blob type
+};
+
+//---------------------------
+//  SizeDistributionElement
+//---------------------------
+//  During CodeHeap analysis, each allocated code block is associated with a
+//  SizeDistributionElement according to its size. Later on, the array of
+//  SizeDistributionElements is used to print a size distribution bar graph.
+//  All SizeDistributionElements of a heap segment are stored in the related SizeDistributionArray.
+struct SizeDistributionElement : public CHeapObj<mtCode> {
+                               // Range is [rangeStart..rangeEnd).
+  unsigned int   rangeStart;   // start of length range, in _segment_size units.
+  unsigned int   rangeEnd;     // end   of length range, in _segment_size units.
+  unsigned int   lenSum;       // length of block, in _segment_size units. Will never overflow int.
+
+  unsigned int   count;        // number of blocks assigned to this range.
+};
+
+//----------------
+//  CodeHeapStat
+//----------------
+//  Because we have to deal with multiple CodeHeaps, we need to
+//  collect "global" information in a segment-specific way as well.
+//  Thats what the CodeHeapStat and CodeHeapStatArray are used for.
+//  Before a heap segment is processed, the contents of the CodeHeapStat
+//  element is copied to the global variables (get_HeapStatGlobals).
+//  When processing is done, the possibly modified global variables are
+//  copied back (set_HeapStatGlobals) to the CodeHeapStat element.
+struct CodeHeapStat {
+    StatElement*                     StatArray;
+    struct FreeBlk*                  FreeArray;
+    struct TopSizeBlk*               TopSizeArray;
+    struct SizeDistributionElement*  SizeDistributionArray;
+    const char*                      heapName;
+    size_t                           segment_size;
+    // StatElement data
+    size_t        alloc_granules;
+    size_t        granule_size;
+    bool          segment_granules;
+    unsigned int  nBlocks_t1;
+    unsigned int  nBlocks_t2;
+    unsigned int  nBlocks_alive;
+    unsigned int  nBlocks_dead;
+    unsigned int  nBlocks_unloaded;
+    unsigned int  nBlocks_stub;
+    // FreeBlk data
+    unsigned int  alloc_freeBlocks;
+    // UsedBlk data
+    unsigned int  alloc_topSizeBlocks;
+    unsigned int  used_topSizeBlocks;
+    // method hotness data. Temperature range is [-reset_val..+reset_val]
+    int           avgTemp;
+    int           maxTemp;
+    int           minTemp;
+};
+
+#endif // SHARE_CODE_CODEHEAPSTATE_HPP
--- a/src/hotspot/share/code/compiledIC.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/code/compiledIC.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -235,7 +235,7 @@
     assert(k->verify_itable_index(itable_index), "sanity check");
 #endif //ASSERT
     CompiledICHolder* holder = new CompiledICHolder(call_info->resolved_method()->method_holder(),
-                                                    call_info->resolved_klass());
+                                                    call_info->resolved_klass(), false);
     holder->claim();
     InlineCacheBuffer::create_transition_stub(this, holder, entry);
   } else {
@@ -273,7 +273,7 @@
   assert(!is_optimized(), "an optimized call cannot be megamorphic");
 
   // Cannot rely on cached_value. It is either an interface or a method.
-  return VtableStubs::is_entry_point(ic_destination());
+  return VtableStubs::entry_point(ic_destination()) != NULL;
 }
 
 bool CompiledIC::is_call_to_compiled() const {
@@ -525,9 +525,11 @@
     return true;
   }
   // itable stubs also use CompiledICHolder
-  if (VtableStubs::is_entry_point(entry) && VtableStubs::stub_containing(entry)->is_itable_stub()) {
-    return true;
+  if (cb != NULL && cb->is_vtable_blob()) {
+    VtableStub* s = VtableStubs::entry_point(entry);
+    return (s != NULL) && s->is_itable_stub();
   }
+
   return false;
 }
 
--- a/src/hotspot/share/code/dependencies.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/code/dependencies.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1812,18 +1812,18 @@
 }
 
 Klass* Dependencies::check_call_site_target_value(oop call_site, oop method_handle, CallSiteDepChange* changes) {
-  assert(!oopDesc::is_null(call_site), "sanity");
-  assert(!oopDesc::is_null(method_handle), "sanity");
+  assert(call_site != NULL, "sanity");
+  assert(method_handle != NULL, "sanity");
   assert(call_site->is_a(SystemDictionary::CallSite_klass()),     "sanity");
 
   if (changes == NULL) {
     // Validate all CallSites
-    if (java_lang_invoke_CallSite::target(call_site) != method_handle)
+    if (!oopDesc::equals(java_lang_invoke_CallSite::target(call_site), method_handle))
       return call_site->klass();  // assertion failed
   } else {
     // Validate the given CallSite
-    if (call_site == changes->call_site() && java_lang_invoke_CallSite::target(call_site) != changes->method_handle()) {
-      assert(method_handle != changes->method_handle(), "must be");
+    if (oopDesc::equals(call_site, changes->call_site()) && !oopDesc::equals(java_lang_invoke_CallSite::target(call_site), changes->method_handle())) {
+      assert(!oopDesc::equals(method_handle, changes->method_handle()), "must be");
       return call_site->klass();  // assertion failed
     }
   }
--- a/src/hotspot/share/code/dependencies.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/code/dependencies.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -32,6 +32,7 @@
 #include "code/compressedStream.hpp"
 #include "code/nmethod.hpp"
 #include "memory/resourceArea.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "utilities/growableArray.hpp"
 #include "utilities/hashtable.hpp"
 
--- a/src/hotspot/share/code/location.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/code/location.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,7 +27,6 @@
 
 #include "asm/assembler.hpp"
 #include "code/vmreg.hpp"
-#include "memory/allocation.hpp"
 
 // A Location describes a concrete machine variable location
 // (such as integer or floating point register or a stack-held
--- a/src/hotspot/share/code/nmethod.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/code/nmethod.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -37,7 +37,6 @@
 #include "compiler/compilerDirectives.hpp"
 #include "compiler/directivesParser.hpp"
 #include "compiler/disassembler.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "interpreter/bytecode.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
@@ -53,6 +52,7 @@
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/sweeper.hpp"
 #include "runtime/vmThread.hpp"
--- a/src/hotspot/share/code/oopRecorder.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/code/oopRecorder.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -30,6 +30,7 @@
 #include "memory/allocation.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/jniHandles.inline.hpp"
+#include "utilities/copy.hpp"
 
 #ifdef ASSERT
 template <class T> int ValueRecorder<T>::_find_index_calls = 0;
@@ -201,4 +202,3 @@
   }
   return _values.at(location).index();
 }
-
--- a/src/hotspot/share/code/pcDesc.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/code/pcDesc.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_CODE_PCDESC_HPP
 #define SHARE_VM_CODE_PCDESC_HPP
 
-#include "memory/allocation.hpp"
 
 // PcDescs map a physical PC (given as offset from start of nmethod) to
 // the corresponding source scope and byte code index.
--- a/src/hotspot/share/code/relocInfo.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/code/relocInfo.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -28,6 +28,7 @@
 #include "code/nmethod.hpp"
 #include "code/relocInfo.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "runtime/stubCodeGenerator.hpp"
 #include "utilities/copy.hpp"
 #include "oops/oop.inline.hpp"
@@ -307,7 +308,7 @@
 void Relocation::const_set_data_value(address x) {
 #ifdef _LP64
   if (format() == relocInfo::narrow_oop_in_const) {
-    *(narrowOop*)addr() = oopDesc::encode_heap_oop((oop) x);
+    *(narrowOop*)addr() = CompressedOops::encode((oop) x);
   } else {
 #endif
     *(address*)addr() = x;
@@ -319,7 +320,7 @@
 void Relocation::const_verify_data_value(address x) {
 #ifdef _LP64
   if (format() == relocInfo::narrow_oop_in_const) {
-    guarantee(*(narrowOop*)addr() == oopDesc::encode_heap_oop((oop) x), "must agree");
+    guarantee(*(narrowOop*)addr() == CompressedOops::encode((oop) x), "must agree");
   } else {
 #endif
     guarantee(*(address*)addr() == x, "must agree");
--- a/src/hotspot/share/code/vmreg.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/code/vmreg.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
 #define SHARE_VM_CODE_VMREG_HPP
 
 #include "asm/register.hpp"
-#include "memory/allocation.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/ostream.hpp"
--- a/src/hotspot/share/code/vtableStubs.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/code/vtableStubs.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,7 +60,7 @@
 
    // There is a dependency on the name of the blob in src/share/vm/prims/jvmtiCodeBlobEvents.cpp
    // If changing the name, update the other file accordingly.
-    BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
+    VtableBlob* blob = VtableBlob::create("vtable chunks", bytes);
     if (blob == NULL) {
       return NULL;
     }
@@ -167,17 +167,18 @@
   _number_of_vtable_stubs++;
 }
 
-
-bool VtableStubs::is_entry_point(address pc) {
+VtableStub* VtableStubs::entry_point(address pc) {
   MutexLocker ml(VtableStubs_lock);
   VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
   uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index());
   VtableStub* s;
   for (s = _table[hash]; s != NULL && s != stub; s = s->next()) {}
-  return s == stub;
+  if (s == stub) {
+    return s;
+  }
+  return NULL;
 }
 
-
 bool VtableStubs::contains(address pc) {
   // simple solution for now - we may want to use
   // a faster way if this function is called often
--- a/src/hotspot/share/code/vtableStubs.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/code/vtableStubs.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -126,7 +126,7 @@
  public:
   static address     find_vtable_stub(int vtable_index) { return find_stub(true,  vtable_index); }
   static address     find_itable_stub(int itable_index) { return find_stub(false, itable_index); }
-  static bool        is_entry_point(address pc);                     // is pc a vtable stub entry point?
+  static VtableStub* entry_point(address pc);                        // vtable stub entry point for a pc
   static bool        contains(address pc);                           // is pc within any stub?
   static VtableStub* stub_containing(address pc);                    // stub containing pc or NULL
   static int         number_of_vtable_stubs() { return _number_of_vtable_stubs; }
--- a/src/hotspot/share/compiler/compileBroker.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/compiler/compileBroker.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -28,6 +28,7 @@
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "code/codeCache.hpp"
+#include "code/codeHeapState.hpp"
 #include "code/dependencyContext.hpp"
 #include "compiler/compileBroker.hpp"
 #include "compiler/compileLog.hpp"
@@ -50,6 +51,7 @@
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/os.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/sweeper.hpp"
 #include "runtime/timerTrace.hpp"
@@ -522,7 +524,7 @@
 // CompileBroker::compilation_init
 //
 // Initialize the Compilation object
-void CompileBroker::compilation_init(TRAPS) {
+void CompileBroker::compilation_init_phase1(TRAPS) {
   _last_method_compiled[0] = '\0';
 
   // No need to initialize compilation system if we do not use it.
@@ -669,11 +671,14 @@
                                           (jlong)CompileBroker::no_compile,
                                           CHECK);
   }
+}
 
+// Completes compiler initialization. Compilation requests submitted
+// prior to this will be silently ignored.
+void CompileBroker::compilation_init_phase2() {
   _initialized = true;
 }
 
-
 JavaThread* CompileBroker::make_thread(const char* name, CompileQueue* queue, CompilerCounters* counters,
                                        AbstractCompiler* comp, bool compiler_thread, TRAPS) {
   JavaThread* thread = NULL;
@@ -2423,3 +2428,111 @@
     }
   }
 }
+
+// Print general/accumulated JIT information.
+void CompileBroker::print_info(outputStream *out) {
+  if (out == NULL) out = tty;
+  out->cr();
+  out->print_cr("======================");
+  out->print_cr("   General JIT info   ");
+  out->print_cr("======================");
+  out->cr();
+  out->print_cr("            JIT is : %7s",     should_compile_new_jobs() ? "on" : "off");
+  out->print_cr("  Compiler threads : %7d",     (int)CICompilerCount);
+  out->cr();
+  out->print_cr("CodeCache overview");
+  out->print_cr("--------------------------------------------------------");
+  out->cr();
+  out->print_cr("         Reserved size : " SIZE_FORMAT_W(7) " KB", CodeCache::max_capacity() / K);
+  out->print_cr("        Committed size : " SIZE_FORMAT_W(7) " KB", CodeCache::capacity() / K);
+  out->print_cr("  Unallocated capacity : " SIZE_FORMAT_W(7) " KB", CodeCache::unallocated_capacity() / K);
+  out->cr();
+
+  out->cr();
+  out->print_cr("CodeCache cleaning overview");
+  out->print_cr("--------------------------------------------------------");
+  out->cr();
+  NMethodSweeper::print(out);
+  out->print_cr("--------------------------------------------------------");
+  out->cr();
+}
+
+// Note: tty_lock must not be held upon entry to this function.
+//       Print functions called from herein do "micro-locking" on tty_lock.
+//       That's a tradeoff which keeps together important blocks of output.
+//       At the same time, continuous tty_lock hold time is kept in check,
+//       preventing concurrently printing threads from stalling a long time.
+void CompileBroker::print_heapinfo(outputStream* out, const char* function, const char* granularity) {
+  TimeStamp ts_total;
+  TimeStamp ts;
+
+  bool allFun = !strcmp(function, "all");
+  bool aggregate = !strcmp(function, "aggregate") || !strcmp(function, "analyze") || allFun;
+  bool usedSpace = !strcmp(function, "UsedSpace") || allFun;
+  bool freeSpace = !strcmp(function, "FreeSpace") || allFun;
+  bool methodCount = !strcmp(function, "MethodCount") || allFun;
+  bool methodSpace = !strcmp(function, "MethodSpace") || allFun;
+  bool methodAge = !strcmp(function, "MethodAge") || allFun;
+  bool methodNames = !strcmp(function, "MethodNames") || allFun;
+  bool discard = !strcmp(function, "discard") || allFun;
+
+  if (out == NULL) {
+    out = tty;
+  }
+
+  if (!(aggregate || usedSpace || freeSpace || methodCount || methodSpace || methodAge || methodNames || discard)) {
+    out->print_cr("\n__ CodeHeapStateAnalytics: Function %s is not supported", function);
+    out->cr();
+    return;
+  }
+
+  ts_total.update(); // record starting point
+
+  if (aggregate) {
+    print_info(out);
+  }
+
+  // We hold the CodeHeapStateAnalytics_lock all the time, from here until we leave this function.
+  // That helps us getting a consistent view on the CodeHeap, at least for the "all" function.
+  // When we request individual parts of the analysis via the jcmd interface, it is possible
+  // that in between another thread (another jcmd user or the vm running into CodeCache OOM)
+  // updated the aggregated data. That's a tolerable tradeoff because we can't hold a lock
+  // across user interaction.
+  ts.update(); // record starting point
+  MutexLockerEx mu1(CodeHeapStateAnalytics_lock, Mutex::_no_safepoint_check_flag);
+  out->cr();
+  out->print_cr("__ CodeHeapStateAnalytics lock wait took %10.3f seconds _________", ts.seconds());
+  out->cr();
+
+  if (aggregate) {
+    // It is sufficient to hold the CodeCache_lock only for the aggregate step.
+    // All other functions operate on aggregated data - except MethodNames, but that should be safe.
+    // The separate CodeHeapStateAnalytics_lock protects the printing functions against
+    // concurrent aggregate steps. Acquire this lock before acquiring the CodeCache_lock.
+    // CodeHeapStateAnalytics_lock could be held by a concurrent thread for a long time,
+    // leading to an unnecessarily long hold time of the CodeCache_lock.
+    ts.update(); // record starting point
+    MutexLockerEx mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    out->cr();
+    out->print_cr("__ CodeCache lock wait took %10.3f seconds _________", ts.seconds());
+    out->cr();
+
+    ts.update(); // record starting point
+    CodeCache::aggregate(out, granularity);
+    out->cr();
+    out->print_cr("__ CodeCache lock hold took %10.3f seconds _________", ts.seconds());
+    out->cr();
+  }
+
+  if (usedSpace) CodeCache::print_usedSpace(out);
+  if (freeSpace) CodeCache::print_freeSpace(out);
+  if (methodCount) CodeCache::print_count(out);
+  if (methodSpace) CodeCache::print_space(out);
+  if (methodAge) CodeCache::print_age(out);
+  if (methodNames) CodeCache::print_names(out);
+  if (discard) CodeCache::discard(out);
+
+  out->cr();
+  out->print_cr("__ CodeHeapStateAnalytics total duration %10.3f seconds _________", ts_total.seconds());
+  out->cr();
+}
--- a/src/hotspot/share/compiler/compileBroker.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/compiler/compileBroker.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -282,7 +282,8 @@
     CompileQueue *q = compile_queue(comp_level);
     return q != NULL ? q->size() : 0;
   }
-  static void compilation_init(TRAPS);
+  static void compilation_init_phase1(TRAPS);
+  static void compilation_init_phase2();
   static void init_compiler_thread_log();
   static nmethod* compile_method(const methodHandle& method,
                                  int osr_bci,
@@ -381,6 +382,10 @@
 
   // Log that compilation profiling is skipped because metaspace is full.
   static void log_metaspace_failure();
+
+  // CodeHeap State Analytics.
+  static void print_info(outputStream *out);
+  static void print_heapinfo(outputStream *out, const char* function, const char* granularity );
 };
 
 #endif // SHARE_VM_COMPILER_COMPILEBROKER_HPP
--- a/src/hotspot/share/gc/cms/cmsArguments.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/cms/cmsArguments.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -80,8 +80,8 @@
 // sparc/solaris for certain applications, but would gain from
 // further optimization and tuning efforts, and would almost
 // certainly gain from analysis of platform and environment.
-void CMSArguments::initialize_flags() {
-  GCArguments::initialize_flags();
+void CMSArguments::initialize() {
+  GCArguments::initialize();
   assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC, "Error");
   assert(UseConcMarkSweepGC, "CMS is expected to be on here");
 
--- a/src/hotspot/share/gc/cms/cmsArguments.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/cms/cmsArguments.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -34,7 +34,7 @@
   void disable_adaptive_size_policy(const char* collector_name);
   void set_parnew_gc_flags();
 public:
-  virtual void initialize_flags();
+  virtual void initialize();
   virtual size_t conservative_max_heap_alignment();
   virtual CollectedHeap* create_heap();
 };
--- a/src/hotspot/share/gc/cms/cmsCollectorPolicy.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/cms/cmsCollectorPolicy.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -28,7 +28,7 @@
 #include "gc/shared/adaptiveSizePolicy.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/collectorPolicy.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcPolicyCounters.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/generationSpec.hpp"
--- a/src/hotspot/share/gc/cms/cmsHeap.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/cms/cmsHeap.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -132,7 +132,7 @@
 CMSHeap* CMSHeap::heap() {
   CollectedHeap* heap = Universe::heap();
   assert(heap != NULL, "Uninitialized access to CMSHeap::heap()");
-  assert(heap->kind() == CollectedHeap::CMSHeap, "Not a CMSHeap");
+  assert(heap->kind() == CollectedHeap::CMS, "Invalid name");
   return (CMSHeap*) heap;
 }
 
--- a/src/hotspot/share/gc/cms/cmsHeap.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/cms/cmsHeap.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -57,7 +57,7 @@
   static CMSHeap* heap();
 
   virtual Name kind() const {
-    return CollectedHeap::CMSHeap;
+    return CollectedHeap::CMS;
   }
 
   virtual const char* name() const {
--- a/src/hotspot/share/gc/cms/cmsOopClosures.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/cms/cmsOopClosures.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -28,6 +28,8 @@
 #include "gc/cms/cmsOopClosures.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 
 // MetadataAwareOopClosure and MetadataAwareOopsInGenClosure are duplicated,
@@ -45,13 +47,13 @@
 }
 
 // Decode the oop and call do_oop on it.
-#define DO_OOP_WORK_IMPL(cls)                                 \
-  template <class T> void cls::do_oop_work(T* p) {            \
-    T heap_oop = oopDesc::load_heap_oop(p);                   \
-    if (!oopDesc::is_null(heap_oop)) {                        \
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);  \
-      do_oop(obj);                                            \
-    }                                                         \
+#define DO_OOP_WORK_IMPL(cls)                               \
+  template <class T> void cls::do_oop_work(T* p) {          \
+    T heap_oop = RawAccess<>::oop_load(p);                  \
+    if (!CompressedOops::is_null(heap_oop)) {               \
+      oop obj = CompressedOops::decode_not_null(heap_oop);  \
+      do_oop(obj);                                          \
+    }                                                       \
   }
 
 #define DO_OOP_WORK_NV_IMPL(cls)                              \
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -37,6 +37,8 @@
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/handles.inline.hpp"
@@ -2250,9 +2252,9 @@
   }
 
   template <class T> void do_oop_work(T* p) {
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    T heap_oop = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(heap_oop)) {
+      oop obj = CompressedOops::decode_not_null(heap_oop);
       do_oop(p, obj);
     }
   }
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -44,7 +44,7 @@
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/collectorCounters.hpp"
 #include "gc/shared/collectorPolicy.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcPolicyCounters.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
@@ -62,6 +62,7 @@
 #include "memory/iterator.inline.hpp"
 #include "memory/padded.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "runtime/atomic.hpp"
@@ -6638,6 +6639,11 @@
   _mark_stack(mark_stack)
 { }
 
+template <class T> void PushAndMarkVerifyClosure::do_oop_work(T *p) {
+  oop obj = RawAccess<>::oop_load(p);
+  do_oop(obj);
+}
+
 void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
 
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1319,10 +1319,8 @@
   CMSMarkStack*    _mark_stack;
  protected:
   void do_oop(oop p);
-  template <class T> inline void do_oop_work(T *p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
-    do_oop(obj);
-  }
+  template <class T> void do_oop_work(T *p);
+
  public:
   PushAndMarkVerifyClosure(CMSCollector* cms_collector,
                            MemRegion span,
--- a/src/hotspot/share/gc/cms/parNewGeneration.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -51,6 +51,8 @@
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/objArrayOop.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
@@ -679,8 +681,7 @@
 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
 #ifdef ASSERT
   {
-    assert(!oopDesc::is_null(*p), "expected non-null ref");
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
     // We never expect to see a null reference being processed
     // as a weak reference.
     assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs");
@@ -690,7 +691,7 @@
   _par_cl->do_oop_nv(p);
 
   if (CMSHeap::heap()->is_in_reserved(p)) {
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);;
     _rs->write_ref_field_gc_par(p, obj);
   }
 }
@@ -706,8 +707,7 @@
 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
 #ifdef ASSERT
   {
-    assert(!oopDesc::is_null(*p), "expected non-null ref");
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
     // We never expect to see a null reference being processed
     // as a weak reference.
     assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs");
@@ -717,7 +717,7 @@
   _cl->do_oop_nv(p);
 
   if (CMSHeap::heap()->is_in_reserved(p)) {
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
     _rs->write_ref_field_gc_par(p, obj);
   }
 }
@@ -726,15 +726,15 @@
 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); }
 
 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     if ((HeapWord*)obj < _boundary) {
       assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
       oop new_obj = obj->is_forwarded()
                       ? obj->forwardee()
                       : _g->DefNewGeneration::copy_to_survivor_space(obj);
-      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+      RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
     }
     if (_gc_barrier) {
       // If p points to a younger generation, mark the card.
--- a/src/hotspot/share/gc/cms/parOopClosures.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/cms/parOopClosures.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -32,10 +32,11 @@
 #include "gc/shared/genOopClosures.inline.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 
 template <class T> inline void ParScanWeakRefClosure::do_oop_work(T* p) {
-  assert (!oopDesc::is_null(*p), "null weak reference?");
-  oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+  oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
   // weak references are sometimes scanned twice; must check
   // that to-space doesn't already contain this object
   if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) {
@@ -51,7 +52,7 @@
       new_obj = ((ParNewGeneration*)_g)->copy_to_survivor_space(_par_scan_state,
                                                                 obj, obj_sz, m);
     }
-    oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+    RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
   }
 }
 
@@ -60,8 +61,7 @@
 
 template <class T> inline void ParScanClosure::par_do_barrier(T* p) {
   assert(generation()->is_in_reserved(p), "expected ref in generation");
-  assert(!oopDesc::is_null(*p), "expected non-null object");
-  oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+  oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
   // If p points to a younger generation, mark the card.
   if ((HeapWord*)obj < gen_boundary()) {
     rs()->write_ref_field_gc_par(p, obj);
@@ -77,9 +77,9 @@
          && (CMSHeap::heap()->is_young_gen(generation()) || gc_barrier),
          "The gen must be right, and we must be doing the barrier "
          "in older generations.");
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     if ((HeapWord*)obj < _boundary) {
 #ifndef PRODUCT
       if (_g->to()->is_in_reserved(obj)) {
@@ -111,14 +111,14 @@
       oop new_obj;
       if (m->is_marked()) { // Contains forwarding pointer.
         new_obj = ParNewGeneration::real_forwardee(obj);
-        oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+        RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
         log_develop_trace(gc, scavenge)("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
                                         "forwarded ",
                                         new_obj->klass()->internal_name(), p2i(p), p2i((void *)obj), p2i((void *)new_obj), new_obj->size());
       } else {
         size_t obj_sz = obj->size_given_klass(objK);
         new_obj = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);
-        oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+        RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
         if (root_scan) {
           // This may have pushed an object.  If we have a root
           // category with a lot of roots, can't let the queue get too
--- a/src/hotspot/share/gc/cms/promotionInfo.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/cms/promotionInfo.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,8 +26,9 @@
 #include "gc/cms/compactibleFreeListSpace.hpp"
 #include "gc/cms/promotionInfo.hpp"
 #include "gc/shared/genOopClosures.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/markOop.inline.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/oop.hpp"
 
 /////////////////////////////////////////////////////////////////////////
 //// PromotionInfo
@@ -39,7 +40,7 @@
   PromotedObject* res;
   if (UseCompressedOops) {
     // The next pointer is a compressed oop stored in the top 32 bits
-    res = (PromotedObject*)oopDesc::decode_heap_oop(_data._narrow_next);
+    res = (PromotedObject*)CompressedOops::decode(_data._narrow_next);
   } else {
     res = (PromotedObject*)(_next & next_mask);
   }
@@ -52,7 +53,7 @@
          "or insufficient alignment of objects");
   if (UseCompressedOops) {
     assert(_data._narrow_next == 0, "Overwrite?");
-    _data._narrow_next = oopDesc::encode_heap_oop(oop(x));
+    _data._narrow_next = CompressedOops::encode(oop(x));
   } else {
     _next |= (intptr_t)x;
   }
--- a/src/hotspot/share/gc/cms/vmCMSOperations.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/cms/vmCMSOperations.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,7 +27,7 @@
 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
 #include "gc/cms/concurrentMarkSweepThread.hpp"
 #include "gc/cms/vmCMSOperations.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/isGCActiveMark.hpp"
--- a/src/hotspot/share/gc/g1/collectionSetChooser.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/collectionSetChooser.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "gc/g1/collectionSetChooser.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/shared/space.inline.hpp"
 #include "runtime/atomic.hpp"
 
@@ -83,8 +84,7 @@
                   100), true /* C_Heap */),
     _front(0), _end(0), _first_par_unreserved_idx(0),
     _region_live_threshold_bytes(0), _remaining_reclaimable_bytes(0) {
-  _region_live_threshold_bytes =
-    HeapRegion::GrainBytes * (size_t) G1MixedGCLiveThresholdPercent / 100;
+  _region_live_threshold_bytes = mixed_gc_live_threshold_bytes();
 }
 
 #ifndef PRODUCT
@@ -148,6 +148,8 @@
   assert(!hr->is_pinned(),
          "Pinned region shouldn't be added to the collection set (index %u)", hr->hrm_index());
   assert(!hr->is_young(), "should not be young!");
+  assert(hr->rem_set()->is_complete(),
+         "Trying to add region %u to the collection set with incomplete remembered set", hr->hrm_index());
   _regions.append(hr);
   _end++;
   _remaining_reclaimable_bytes += hr->reclaimable_bytes();
@@ -203,6 +205,16 @@
   }
 }
 
+void CollectionSetChooser::iterate(HeapRegionClosure* cl) {
+  for (uint i = _front; i < _end; i++) {
+    HeapRegion* r = regions_at(i);
+    if (cl->do_heap_region(r)) {
+      cl->set_incomplete();
+      break;
+    }
+  }
+}
+
 void CollectionSetChooser::clear() {
   _regions.clear();
   _front = 0;
@@ -228,6 +240,10 @@
       // before we fill them up).
       if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
         _cset_updater.add_region(r);
+      } else if (r->is_old()) {
+        // Can clean out the remembered sets of all regions that we did not choose but
+        // we created the remembered set for.
+        r->rem_set()->clear(true);
       }
     }
     return false;
@@ -259,6 +275,18 @@
   return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
 }
 
+bool CollectionSetChooser::region_occupancy_low_enough_for_evac(size_t live_bytes) {
+  return live_bytes < mixed_gc_live_threshold_bytes();
+}
+
+bool CollectionSetChooser::should_add(HeapRegion* hr) const {
+  assert(hr->is_marked(), "pre-condition");
+  assert(!hr->is_young(), "should never consider young regions");
+  return !hr->is_pinned() &&
+          region_occupancy_low_enough_for_evac(hr->live_bytes()) &&
+          hr->rem_set()->is_complete();
+}
+
 void CollectionSetChooser::rebuild(WorkGang* workers, uint n_regions) {
   clear();
 
--- a/src/hotspot/share/gc/g1/collectionSetChooser.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/collectionSetChooser.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -101,17 +101,19 @@
 
   CollectionSetChooser();
 
+  static size_t mixed_gc_live_threshold_bytes() {
+    return HeapRegion::GrainBytes * (size_t) G1MixedGCLiveThresholdPercent / 100;
+  }
+
+  static bool region_occupancy_low_enough_for_evac(size_t live_bytes);
+
   void sort_regions();
 
   // Determine whether to add the given region to the CSet chooser or
   // not. Currently, we skip pinned regions and regions whose live
   // bytes are over the threshold. Humongous regions may be reclaimed during cleanup.
-  bool should_add(HeapRegion* hr) {
-    assert(hr->is_marked(), "pre-condition");
-    assert(!hr->is_young(), "should never consider young regions");
-    return !hr->is_pinned() &&
-            hr->live_bytes() < _region_live_threshold_bytes;
-  }
+  // Regions also need a complete remembered set to be a candidate.
+  bool should_add(HeapRegion* hr) const;
 
   // Returns the number candidate old regions added
   uint length() { return _end; }
@@ -133,6 +135,9 @@
   // and the amount of reclaimable bytes by reclaimable_bytes.
   void update_totals(uint region_num, size_t reclaimable_bytes);
 
+  // Iterate over all collection set candidate regions.
+  void iterate(HeapRegionClosure* cl);
+
   void clear();
 
   void rebuild(WorkGang* workers, uint n_regions);
--- a/src/hotspot/share/gc/g1/concurrentMarkThread.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,486 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/classLoaderData.hpp"
-#include "gc/g1/concurrentMarkThread.inline.hpp"
-#include "gc/g1/g1Analytics.hpp"
-#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1ConcurrentMark.inline.hpp"
-#include "gc/g1/g1MMUTracker.hpp"
-#include "gc/g1/g1Policy.hpp"
-#include "gc/g1/vm_operations_g1.hpp"
-#include "gc/shared/concurrentGCPhaseManager.hpp"
-#include "gc/shared/gcId.hpp"
-#include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/suspendibleThreadSet.hpp"
-#include "logging/log.hpp"
-#include "memory/resourceArea.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/vmThread.hpp"
-#include "utilities/debug.hpp"
-
-// ======= Concurrent Mark Thread ========
-
-// Check order in EXPAND_CURRENT_PHASES
-STATIC_ASSERT(ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE <
-              ConcurrentGCPhaseManager::IDLE_PHASE);
-
-#define EXPAND_CONCURRENT_PHASES(expander)                              \
-  expander(ANY, = ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE, NULL)  \
-  expander(IDLE, = ConcurrentGCPhaseManager::IDLE_PHASE, NULL)          \
-  expander(CONCURRENT_CYCLE,, "Concurrent Cycle")                       \
-  expander(CLEAR_CLAIMED_MARKS,, "Concurrent Clear Claimed Marks")      \
-  expander(SCAN_ROOT_REGIONS,, "Concurrent Scan Root Regions")          \
-  expander(CONCURRENT_MARK,, "Concurrent Mark")                         \
-  expander(MARK_FROM_ROOTS,, "Concurrent Mark From Roots")              \
-  expander(BEFORE_REMARK,, NULL)                                        \
-  expander(REMARK,, NULL)                                               \
-  expander(CREATE_LIVE_DATA,, "Concurrent Create Live Data")            \
-  expander(COMPLETE_CLEANUP,, "Concurrent Complete Cleanup")            \
-  expander(CLEANUP_FOR_NEXT_MARK,, "Concurrent Cleanup for Next Mark")  \
-  /* */
-
-class G1ConcurrentPhase : public AllStatic {
-public:
-  enum {
-#define CONCURRENT_PHASE_ENUM(tag, value, ignore_title) tag value,
-    EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_ENUM)
-#undef CONCURRENT_PHASE_ENUM
-    PHASE_ID_LIMIT
-  };
-};
-
-// The CM thread is created when the G1 garbage collector is used
-
-ConcurrentMarkThread::ConcurrentMarkThread(G1ConcurrentMark* cm) :
-  ConcurrentGCThread(),
-  _cm(cm),
-  _state(Idle),
-  _phase_manager_stack(),
-  _vtime_accum(0.0),
-  _vtime_mark_accum(0.0) {
-
-  set_name("G1 Main Marker");
-  create_and_start();
-}
-
-class CMCheckpointRootsFinalClosure: public VoidClosure {
-
-  G1ConcurrentMark* _cm;
-public:
-
-  CMCheckpointRootsFinalClosure(G1ConcurrentMark* cm) :
-    _cm(cm) {}
-
-  void do_void(){
-    _cm->checkpoint_roots_final(false); // !clear_all_soft_refs
-  }
-};
-
-class CMCleanUp: public VoidClosure {
-  G1ConcurrentMark* _cm;
-public:
-
-  CMCleanUp(G1ConcurrentMark* cm) :
-    _cm(cm) {}
-
-  void do_void(){
-    _cm->cleanup();
-  }
-};
-
-double ConcurrentMarkThread::mmu_sleep_time(G1Policy* g1_policy, bool remark) {
-  // There are 3 reasons to use SuspendibleThreadSetJoiner.
-  // 1. To avoid concurrency problem.
-  //    - G1MMUTracker::add_pause(), when_sec() and its variation(when_ms() etc..) can be called
-  //      concurrently from ConcurrentMarkThread and VMThread.
-  // 2. If currently a gc is running, but it has not yet updated the MMU,
-  //    we will not forget to consider that pause in the MMU calculation.
-  // 3. If currently a gc is running, ConcurrentMarkThread will wait it to be finished.
-  //    And then sleep for predicted amount of time by delay_to_keep_mmu().
-  SuspendibleThreadSetJoiner sts_join;
-
-  const G1Analytics* analytics = g1_policy->analytics();
-  double now = os::elapsedTime();
-  double prediction_ms = remark ? analytics->predict_remark_time_ms()
-                                : analytics->predict_cleanup_time_ms();
-  G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
-  return mmu_tracker->when_ms(now, prediction_ms);
-}
-
-void ConcurrentMarkThread::delay_to_keep_mmu(G1Policy* g1_policy, bool remark) {
-  if (g1_policy->adaptive_young_list_length()) {
-    jlong sleep_time_ms = mmu_sleep_time(g1_policy, remark);
-    if (!cm()->has_aborted() && sleep_time_ms > 0) {
-      os::sleep(this, sleep_time_ms, false);
-    }
-  }
-}
-
-class G1ConcPhaseTimer : public GCTraceConcTimeImpl<LogLevel::Info, LOG_TAGS(gc, marking)> {
-  G1ConcurrentMark* _cm;
-
- public:
-  G1ConcPhaseTimer(G1ConcurrentMark* cm, const char* title) :
-    GCTraceConcTimeImpl<LogLevel::Info,  LogTag::_gc, LogTag::_marking>(title),
-    _cm(cm)
-  {
-    _cm->gc_timer_cm()->register_gc_concurrent_start(title);
-  }
-
-  ~G1ConcPhaseTimer() {
-    _cm->gc_timer_cm()->register_gc_concurrent_end();
-  }
-};
-
-static const char* const concurrent_phase_names[] = {
-#define CONCURRENT_PHASE_NAME(tag, ignore_value, ignore_title) XSTR(tag),
-  EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_NAME)
-#undef CONCURRENT_PHASE_NAME
-  NULL                          // terminator
-};
-// Verify dense enum assumption.  +1 for terminator.
-STATIC_ASSERT(G1ConcurrentPhase::PHASE_ID_LIMIT + 1 ==
-              ARRAY_SIZE(concurrent_phase_names));
-
-// Returns the phase number for name, or a negative value if unknown.
-static int lookup_concurrent_phase(const char* name) {
-  const char* const* names = concurrent_phase_names;
-  for (uint i = 0; names[i] != NULL; ++i) {
-    if (strcmp(name, names[i]) == 0) {
-      return static_cast<int>(i);
-    }
-  }
-  return -1;
-}
-
-// The phase must be valid and must have a title.
-static const char* lookup_concurrent_phase_title(int phase) {
-  static const char* const titles[] = {
-#define CONCURRENT_PHASE_TITLE(ignore_tag, ignore_value, title) title,
-    EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_TITLE)
-#undef CONCURRENT_PHASE_TITLE
-  };
-  // Verify dense enum assumption.
-  STATIC_ASSERT(G1ConcurrentPhase::PHASE_ID_LIMIT == ARRAY_SIZE(titles));
-
-  assert(0 <= phase, "precondition");
-  assert((uint)phase < ARRAY_SIZE(titles), "precondition");
-  const char* title = titles[phase];
-  assert(title != NULL, "precondition");
-  return title;
-}
-
-class G1ConcPhaseManager : public StackObj {
-  G1ConcurrentMark* _cm;
-  ConcurrentGCPhaseManager _manager;
-
-public:
-  G1ConcPhaseManager(int phase, ConcurrentMarkThread* thread) :
-    _cm(thread->cm()),
-    _manager(phase, thread->phase_manager_stack())
-  { }
-
-  ~G1ConcPhaseManager() {
-    // Deactivate the manager if marking aborted, to avoid blocking on
-    // phase exit when the phase has been requested.
-    if (_cm->has_aborted()) {
-      _manager.deactivate();
-    }
-  }
-
-  void set_phase(int phase, bool force) {
-    _manager.set_phase(phase, force);
-  }
-};
-
-// Combine phase management and timing into one convenient utility.
-class G1ConcPhase : public StackObj {
-  G1ConcPhaseTimer _timer;
-  G1ConcPhaseManager _manager;
-
-public:
-  G1ConcPhase(int phase, ConcurrentMarkThread* thread) :
-    _timer(thread->cm(), lookup_concurrent_phase_title(phase)),
-    _manager(phase, thread)
-  { }
-};
-
-const char* const* ConcurrentMarkThread::concurrent_phases() const {
-  return concurrent_phase_names;
-}
-
-bool ConcurrentMarkThread::request_concurrent_phase(const char* phase_name) {
-  int phase = lookup_concurrent_phase(phase_name);
-  if (phase < 0) return false;
-
-  while (!ConcurrentGCPhaseManager::wait_for_phase(phase,
-                                                   phase_manager_stack())) {
-    assert(phase != G1ConcurrentPhase::ANY, "Wait for ANY phase must succeed");
-    if ((phase != G1ConcurrentPhase::IDLE) && !during_cycle()) {
-      // If idle and the goal is !idle, start a collection.
-      G1CollectedHeap::heap()->collect(GCCause::_wb_conc_mark);
-    }
-  }
-  return true;
-}
-
-void ConcurrentMarkThread::run_service() {
-  _vtime_start = os::elapsedVTime();
-
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  G1Policy* g1_policy = g1h->g1_policy();
-
-  G1ConcPhaseManager cpmanager(G1ConcurrentPhase::IDLE, this);
-
-  while (!should_terminate()) {
-    // wait until started is set.
-    sleepBeforeNextCycle();
-    if (should_terminate()) {
-      break;
-    }
-
-    cpmanager.set_phase(G1ConcurrentPhase::CONCURRENT_CYCLE, false /* force */);
-
-    GCIdMark gc_id_mark;
-
-    cm()->concurrent_cycle_start();
-
-    GCTraceConcTime(Info, gc) tt("Concurrent Cycle");
-    {
-      ResourceMark rm;
-      HandleMark   hm;
-      double cycle_start = os::elapsedVTime();
-
-      {
-        G1ConcPhase p(G1ConcurrentPhase::CLEAR_CLAIMED_MARKS, this);
-        ClassLoaderDataGraph::clear_claimed_marks();
-      }
-
-      // We have to ensure that we finish scanning the root regions
-      // before the next GC takes place. To ensure this we have to
-      // make sure that we do not join the STS until the root regions
-      // have been scanned. If we did then it's possible that a
-      // subsequent GC could block us from joining the STS and proceed
-      // without the root regions have been scanned which would be a
-      // correctness issue.
-
-      {
-        G1ConcPhase p(G1ConcurrentPhase::SCAN_ROOT_REGIONS, this);
-        _cm->scan_root_regions();
-      }
-
-      // It would be nice to use the G1ConcPhase class here but
-      // the "end" logging is inside the loop and not at the end of
-      // a scope. Also, the timer doesn't support nesting.
-      // Mimicking the same log output instead.
-      {
-        G1ConcPhaseManager mark_manager(G1ConcurrentPhase::CONCURRENT_MARK, this);
-        jlong mark_start = os::elapsed_counter();
-        const char* cm_title =
-          lookup_concurrent_phase_title(G1ConcurrentPhase::CONCURRENT_MARK);
-        log_info(gc, marking)("%s (%.3fs)",
-                              cm_title,
-                              TimeHelper::counter_to_seconds(mark_start));
-        for (uint iter = 1; !cm()->has_aborted(); ++iter) {
-          // Concurrent marking.
-          {
-            G1ConcPhase p(G1ConcurrentPhase::MARK_FROM_ROOTS, this);
-            _cm->mark_from_roots();
-          }
-          if (cm()->has_aborted()) break;
-
-          // Provide a control point after mark_from_roots.
-          {
-            G1ConcPhaseManager p(G1ConcurrentPhase::BEFORE_REMARK, this);
-          }
-          if (cm()->has_aborted()) break;
-
-          // Delay remark pause for MMU.
-          double mark_end_time = os::elapsedVTime();
-          jlong mark_end = os::elapsed_counter();
-          _vtime_mark_accum += (mark_end_time - cycle_start);
-          delay_to_keep_mmu(g1_policy, true /* remark */);
-          if (cm()->has_aborted()) break;
-
-          // Pause Remark.
-          log_info(gc, marking)("%s (%.3fs, %.3fs) %.3fms",
-                                cm_title,
-                                TimeHelper::counter_to_seconds(mark_start),
-                                TimeHelper::counter_to_seconds(mark_end),
-                                TimeHelper::counter_to_millis(mark_end - mark_start));
-          mark_manager.set_phase(G1ConcurrentPhase::REMARK, false);
-          CMCheckpointRootsFinalClosure final_cl(_cm);
-          VM_CGC_Operation op(&final_cl, "Pause Remark");
-          VMThread::execute(&op);
-          if (cm()->has_aborted()) {
-            break;
-          } else if (!cm()->restart_for_overflow()) {
-            break;              // Exit loop if no restart requested.
-          } else {
-            // Loop to restart for overflow.
-            mark_manager.set_phase(G1ConcurrentPhase::CONCURRENT_MARK, false);
-            log_info(gc, marking)("%s Restart for Mark Stack Overflow (iteration #%u)",
-                                  cm_title, iter);
-          }
-        }
-      }
-
-      if (!cm()->has_aborted()) {
-        G1ConcPhase p(G1ConcurrentPhase::CREATE_LIVE_DATA, this);
-        cm()->create_live_data();
-      }
-
-      double end_time = os::elapsedVTime();
-      // Update the total virtual time before doing this, since it will try
-      // to measure it to get the vtime for this marking.  We purposely
-      // neglect the presumably-short "completeCleanup" phase here.
-      _vtime_accum = (end_time - _vtime_start);
-
-      if (!cm()->has_aborted()) {
-        delay_to_keep_mmu(g1_policy, false /* cleanup */);
-
-        if (!cm()->has_aborted()) {
-          CMCleanUp cl_cl(_cm);
-          VM_CGC_Operation op(&cl_cl, "Pause Cleanup");
-          VMThread::execute(&op);
-        }
-      } else {
-        // We don't want to update the marking status if a GC pause
-        // is already underway.
-        SuspendibleThreadSetJoiner sts_join;
-        g1h->collector_state()->set_mark_in_progress(false);
-      }
-
-      // Check if cleanup set the free_regions_coming flag. If it
-      // hasn't, we can just skip the next step.
-      if (g1h->free_regions_coming()) {
-        // The following will finish freeing up any regions that we
-        // found to be empty during cleanup. We'll do this part
-        // without joining the suspendible set. If an evacuation pause
-        // takes place, then we would carry on freeing regions in
-        // case they are needed by the pause. If a Full GC takes
-        // place, it would wait for us to process the regions
-        // reclaimed by cleanup.
-
-        // Now do the concurrent cleanup operation.
-        G1ConcPhase p(G1ConcurrentPhase::COMPLETE_CLEANUP, this);
-        _cm->complete_cleanup();
-
-        // Notify anyone who's waiting that there are no more free
-        // regions coming. We have to do this before we join the STS
-        // (in fact, we should not attempt to join the STS in the
-        // interval between finishing the cleanup pause and clearing
-        // the free_regions_coming flag) otherwise we might deadlock:
-        // a GC worker could be blocked waiting for the notification
-        // whereas this thread will be blocked for the pause to finish
-        // while it's trying to join the STS, which is conditional on
-        // the GC workers finishing.
-        g1h->reset_free_regions_coming();
-      }
-      guarantee(cm()->cleanup_list_is_empty(),
-                "at this point there should be no regions on the cleanup list");
-
-      // There is a tricky race before recording that the concurrent
-      // cleanup has completed and a potential Full GC starting around
-      // the same time. We want to make sure that the Full GC calls
-      // abort() on concurrent mark after
-      // record_concurrent_mark_cleanup_completed(), since abort() is
-      // the method that will reset the concurrent mark state. If we
-      // end up calling record_concurrent_mark_cleanup_completed()
-      // after abort() then we might incorrectly undo some of the work
-      // abort() did. Checking the has_aborted() flag after joining
-      // the STS allows the correct ordering of the two methods. There
-      // are two scenarios:
-      //
-      // a) If we reach here before the Full GC, the fact that we have
-      // joined the STS means that the Full GC cannot start until we
-      // leave the STS, so record_concurrent_mark_cleanup_completed()
-      // will complete before abort() is called.
-      //
-      // b) If we reach here during the Full GC, we'll be held up from
-      // joining the STS until the Full GC is done, which means that
-      // abort() will have completed and has_aborted() will return
-      // true to prevent us from calling
-      // record_concurrent_mark_cleanup_completed() (and, in fact, it's
-      // not needed any more as the concurrent mark state has been
-      // already reset).
-      {
-        SuspendibleThreadSetJoiner sts_join;
-        if (!cm()->has_aborted()) {
-          g1_policy->record_concurrent_mark_cleanup_completed();
-        } else {
-          log_info(gc, marking)("Concurrent Mark Abort");
-        }
-      }
-
-      // We now want to allow clearing of the marking bitmap to be
-      // suspended by a collection pause.
-      // We may have aborted just before the remark. Do not bother clearing the
-      // bitmap then, as it has been done during mark abort.
-      if (!cm()->has_aborted()) {
-        G1ConcPhase p(G1ConcurrentPhase::CLEANUP_FOR_NEXT_MARK, this);
-        _cm->cleanup_for_next_mark();
-      } else {
-        assert(!G1VerifyBitmaps || _cm->next_mark_bitmap_is_clear(), "Next mark bitmap must be clear");
-      }
-    }
-
-    // Update the number of full collections that have been
-    // completed. This will also notify the FullGCCount_lock in case a
-    // Java thread is waiting for a full GC to happen (e.g., it
-    // called System.gc() with +ExplicitGCInvokesConcurrent).
-    {
-      SuspendibleThreadSetJoiner sts_join;
-      g1h->increment_old_marking_cycles_completed(true /* concurrent */);
-
-      cm()->concurrent_cycle_end();
-    }
-
-    cpmanager.set_phase(G1ConcurrentPhase::IDLE, cm()->has_aborted() /* force */);
-  }
-  _cm->root_regions()->cancel_scan();
-}
-
-void ConcurrentMarkThread::stop_service() {
-  MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
-  CGC_lock->notify_all();
-}
-
-void ConcurrentMarkThread::sleepBeforeNextCycle() {
-  // We join here because we don't want to do the "shouldConcurrentMark()"
-  // below while the world is otherwise stopped.
-  assert(!in_progress(), "should have been cleared");
-
-  MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
-  while (!started() && !should_terminate()) {
-    CGC_lock->wait(Mutex::_no_safepoint_check_flag);
-  }
-
-  if (started()) {
-    set_in_progress();
-  }
-}
--- a/src/hotspot/share/gc/g1/concurrentMarkThread.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,102 +0,0 @@
-/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_HPP
-#define SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_HPP
-
-#include "gc/shared/concurrentGCPhaseManager.hpp"
-#include "gc/shared/concurrentGCThread.hpp"
-
-// The Concurrent Mark GC Thread triggers the parallel G1CMConcurrentMarkingTasks
-// as well as handling various marking cleanup.
-
-class G1ConcurrentMark;
-class G1Policy;
-
-class ConcurrentMarkThread: public ConcurrentGCThread {
-  friend class VMStructs;
-
-  double _vtime_start;  // Initial virtual time.
-  double _vtime_accum;  // Accumulated virtual time.
-  double _vtime_mark_accum;
-
-  G1ConcurrentMark*                _cm;
-
-  enum State {
-    Idle,
-    Started,
-    InProgress
-  };
-
-  volatile State _state;
-
-  // WhiteBox testing support.
-  ConcurrentGCPhaseManager::Stack _phase_manager_stack;
-
-  void sleepBeforeNextCycle();
-  // Delay marking to meet MMU.
-  void delay_to_keep_mmu(G1Policy* g1_policy, bool remark);
-  double mmu_sleep_time(G1Policy* g1_policy, bool remark);
-
-  void run_service();
-  void stop_service();
-
- public:
-  // Constructor
-  ConcurrentMarkThread(G1ConcurrentMark* cm);
-
-  // Total virtual time so far for this thread and concurrent marking tasks.
-  double vtime_accum();
-  // Marking virtual time so far this thread and concurrent marking tasks.
-  double vtime_mark_accum();
-
-  G1ConcurrentMark* cm()   { return _cm; }
-
-  void set_idle()          { assert(_state != Started, "must not be starting a new cycle"); _state = Idle; }
-  bool idle()              { return _state == Idle; }
-  void set_started()       { assert(_state == Idle, "cycle in progress"); _state = Started; }
-  bool started()           { return _state == Started; }
-  void set_in_progress()   { assert(_state == Started, "must be starting a cycle"); _state = InProgress; }
-  bool in_progress()       { return _state == InProgress; }
-
-  // Returns true from the moment a marking cycle is
-  // initiated (during the initial-mark pause when started() is set)
-  // to the moment when the cycle completes (just after the next
-  // marking bitmap has been cleared and in_progress() is
-  // cleared). While during_cycle() is true we will not start another cycle
-  // so that cycles do not overlap. We cannot use just in_progress()
-  // as the CM thread might take some time to wake up before noticing
-  // that started() is set and set in_progress().
-  bool during_cycle()      { return !idle(); }
-
-  // WhiteBox testing support.
-  const char* const* concurrent_phases() const;
-  bool request_concurrent_phase(const char* phase);
-
-  ConcurrentGCPhaseManager::Stack* phase_manager_stack() {
-    return &_phase_manager_stack;
-  }
-};
-
-#endif // SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_HPP
--- a/src/hotspot/share/gc/g1/concurrentMarkThread.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_INLINE_HPP
-#define SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_INLINE_HPP
-
-#include "gc/g1/concurrentMarkThread.hpp"
-#include "gc/g1/g1ConcurrentMark.hpp"
-
-  // Total virtual time so far.
-inline double ConcurrentMarkThread::vtime_accum() {
-  return _vtime_accum + _cm->all_task_accum_vtime();
-}
-
-// Marking virtual time so far
-inline double ConcurrentMarkThread::vtime_mark_accum() {
-  return _vtime_mark_accum + _cm->all_task_accum_vtime();
-}
-
-#endif // SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1Allocator.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1Allocator.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,8 +27,10 @@
 #include "gc/g1/g1AllocRegion.inline.hpp"
 #include "gc/g1/g1EvacStats.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1Policy.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
+#include "gc/g1/heapRegionType.hpp"
 #include "utilities/align.hpp"
 
 G1DefaultAllocator::G1DefaultAllocator(G1CollectedHeap* heap) :
@@ -72,13 +74,12 @@
       !(retained_region->top() == retained_region->end()) &&
       !retained_region->is_empty() &&
       !retained_region->is_humongous()) {
-    retained_region->record_timestamp();
     // The retained region was added to the old region set when it was
     // retired. We have to remove it now, since we don't allow regions
     // we allocate to in the region sets. We'll re-add it later, when
     // it's retired again.
     _g1h->old_set_remove(retained_region);
-    bool during_im = _g1h->collector_state()->during_initial_mark_pause();
+    bool during_im = _g1h->collector_state()->in_initial_mark_gc();
     retained_region->note_start_of_copying(during_im);
     old->set(retained_region);
     _g1h->hr_printer()->reuse(retained_region);
@@ -342,6 +343,7 @@
   } else {
     hr->set_closed_archive();
   }
+  _g1h->g1_policy()->remset_tracker()->update_at_allocate(hr);
   _g1h->old_set_add(hr);
   _g1h->hr_printer()->alloc(hr);
   _allocated_regions.append(hr);
--- a/src/hotspot/share/gc/g1/g1Analytics.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1Analytics.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -166,16 +166,16 @@
   _cost_scan_hcc_seq->add(cost_scan_hcc);
 }
 
-void G1Analytics::report_cost_per_entry_ms(double cost_per_entry_ms, bool last_gc_was_young) {
-  if (last_gc_was_young) {
+void G1Analytics::report_cost_per_entry_ms(double cost_per_entry_ms, bool for_young_gc) {
+  if (for_young_gc) {
     _cost_per_entry_ms_seq->add(cost_per_entry_ms);
   } else {
     _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
   }
 }
 
-void G1Analytics::report_cards_per_entry_ratio(double cards_per_entry_ratio, bool last_gc_was_young) {
-  if (last_gc_was_young) {
+void G1Analytics::report_cards_per_entry_ratio(double cards_per_entry_ratio, bool for_young_gc) {
+  if (for_young_gc) {
     _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
   } else {
     _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
@@ -186,8 +186,8 @@
   _rs_length_diff_seq->add(rs_length_diff);
 }
 
-void G1Analytics::report_cost_per_byte_ms(double cost_per_byte_ms, bool in_marking_window) {
-  if (in_marking_window) {
+void G1Analytics::report_cost_per_byte_ms(double cost_per_byte_ms, bool mark_or_rebuild_in_progress) {
+  if (mark_or_rebuild_in_progress) {
     _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
   } else {
     _cost_per_byte_ms_seq->add(cost_per_byte_ms);
@@ -246,16 +246,16 @@
   }
 }
 
-size_t G1Analytics::predict_card_num(size_t rs_length, bool gcs_are_young) const {
-  if (gcs_are_young) {
+size_t G1Analytics::predict_card_num(size_t rs_length, bool for_young_gc) const {
+  if (for_young_gc) {
     return (size_t) (rs_length * predict_young_cards_per_entry_ratio());
   } else {
     return (size_t) (rs_length * predict_mixed_cards_per_entry_ratio());
   }
 }
 
-double G1Analytics::predict_rs_scan_time_ms(size_t card_num, bool gcs_are_young) const {
-  if (gcs_are_young) {
+double G1Analytics::predict_rs_scan_time_ms(size_t card_num, bool for_young_gc) const {
+  if (for_young_gc) {
     return card_num * get_new_prediction(_cost_per_entry_ms_seq);
   } else {
     return predict_mixed_rs_scan_time_ms(card_num);
--- a/src/hotspot/share/gc/g1/g1Analytics.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1Analytics.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -101,10 +101,10 @@
   void report_alloc_rate_ms(double alloc_rate);
   void report_cost_per_card_ms(double cost_per_card_ms);
   void report_cost_scan_hcc(double cost_scan_hcc);
-  void report_cost_per_entry_ms(double cost_per_entry_ms, bool last_gc_was_young);
-  void report_cards_per_entry_ratio(double cards_per_entry_ratio, bool last_gc_was_young);
+  void report_cost_per_entry_ms(double cost_per_entry_ms, bool for_young_gc);
+  void report_cards_per_entry_ratio(double cards_per_entry_ratio, bool for_young_gc);
   void report_rs_length_diff(double rs_length_diff);
-  void report_cost_per_byte_ms(double cost_per_byte_ms, bool in_marking_window);
+  void report_cost_per_byte_ms(double cost_per_byte_ms, bool mark_or_rebuild_in_progress);
   void report_young_other_cost_per_region_ms(double other_cost_per_region_ms);
   void report_non_young_other_cost_per_region_ms(double other_cost_per_region_ms);
   void report_constant_other_time_ms(double constant_other_time_ms);
@@ -126,9 +126,9 @@
 
   double predict_mixed_cards_per_entry_ratio() const;
 
-  size_t predict_card_num(size_t rs_length, bool gcs_are_young) const;
+  size_t predict_card_num(size_t rs_length, bool for_young_gc) const;
 
-  double predict_rs_scan_time_ms(size_t card_num, bool gcs_are_young) const;
+  double predict_rs_scan_time_ms(size_t card_num, bool for_young_gc) const;
 
   double predict_mixed_rs_scan_time_ms(size_t card_num) const;
 
--- a/src/hotspot/share/gc/g1/g1Arguments.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1Arguments.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -37,8 +38,42 @@
   return HeapRegion::max_region_size();
 }
 
-void G1Arguments::initialize_flags() {
-  GCArguments::initialize_flags();
+void G1Arguments::initialize_verification_types() {
+  if (strlen(VerifyGCType) > 0) {
+    const char delimiter[] = " ,\n";
+    size_t length = strlen(VerifyGCType);
+    char* type_list = NEW_C_HEAP_ARRAY(char, length + 1, mtInternal);
+    strncpy(type_list, VerifyGCType, length + 1);
+    char* token = strtok(type_list, delimiter);
+    while (token != NULL) {
+      parse_verification_type(token);
+      token = strtok(NULL, delimiter);
+    }
+    FREE_C_HEAP_ARRAY(char, type_list);
+  }
+}
+
+void G1Arguments::parse_verification_type(const char* type) {
+  if (strcmp(type, "young-only") == 0) {
+    G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyYoungOnly);
+  } else if (strcmp(type, "initial-mark") == 0) {
+    G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyInitialMark);
+  } else if (strcmp(type, "mixed") == 0) {
+    G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyMixed);
+  } else if (strcmp(type, "remark") == 0) {
+    G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyRemark);
+  } else if (strcmp(type, "cleanup") == 0) {
+    G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyCleanup);
+  } else if (strcmp(type, "full") == 0) {
+    G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyFull);
+  } else {
+    log_warning(gc, verify)("VerifyGCType: '%s' is unknown. Available types are: "
+                            "young-only, initial-mark, mixed, remark, cleanup and full", type);
+  }
+}
+
+void G1Arguments::initialize() {
+  GCArguments::initialize();
   assert(UseG1GC, "Error");
   FLAG_SET_DEFAULT(ParallelGCThreads, Abstract_VM_Version::parallel_worker_threads());
   if (ParallelGCThreads == 0) {
@@ -100,12 +135,8 @@
     }
   }
 #endif
-}
 
-bool G1Arguments::parse_verification_type(const char* type) {
-  G1CollectedHeap::heap()->verifier()->parse_verification_type(type);
-  // Always return true because we want to parse all values.
-  return true;
+  initialize_verification_types();
 }
 
 CollectedHeap* G1Arguments::create_heap() {
--- a/src/hotspot/share/gc/g1/g1Arguments.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1Arguments.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -30,9 +31,14 @@
 class CollectedHeap;
 
 class G1Arguments : public GCArguments {
+  friend class G1HeapVerifierTest_parse_Test;
+
+private:
+  static void initialize_verification_types();
+  static void parse_verification_type(const char* type);
+
 public:
-  virtual void initialize_flags();
-  virtual bool parse_verification_type(const char* type);
+  virtual void initialize();
   virtual size_t conservative_max_heap_alignment();
   virtual CollectedHeap* create_heap();
 };
--- a/src/hotspot/share/gc/g1/g1BarrierSet.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -30,6 +30,8 @@
 #include "gc/g1/heapRegion.hpp"
 #include "gc/g1/satbMarkQueue.hpp"
 #include "logging/log.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/thread.inline.hpp"
@@ -77,9 +79,9 @@
   if (!JavaThread::satb_mark_queue_set().is_active()) return;
   T* elem_ptr = dst;
   for (size_t i = 0; i < count; i++, elem_ptr++) {
-    T heap_oop = oopDesc::load_heap_oop(elem_ptr);
-    if (!oopDesc::is_null(heap_oop)) {
-      enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
+    T heap_oop = RawAccess<>::oop_load(elem_ptr);
+    if (!CompressedOops::is_null(heap_oop)) {
+      enqueue(CompressedOops::decode_not_null(heap_oop));
     }
   }
 }
--- a/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -28,7 +28,9 @@
 #include "gc/g1/g1BarrierSet.hpp"
 #include "gc/g1/g1CardTable.hpp"
 #include "gc/shared/accessBarrierSupport.inline.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "oops/oop.hpp"
 
 template <DecoratorSet decorators, typename T>
 inline void G1BarrierSet::write_ref_field_pre(T* field) {
@@ -38,8 +40,8 @@
   }
 
   T heap_oop = RawAccess<MO_VOLATILE>::oop_load(field);
-  if (!oopDesc::is_null(heap_oop)) {
-    enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
+  if (!CompressedOops::is_null(heap_oop)) {
+    enqueue(CompressedOops::decode_not_null(heap_oop));
   }
 }
 
--- a/src/hotspot/share/gc/g1/g1CardLiveData.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,587 +0,0 @@
-/*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1ConcurrentMark.inline.hpp"
-#include "gc/g1/g1CardLiveData.inline.hpp"
-#include "gc/shared/suspendibleThreadSet.hpp"
-#include "gc/shared/workgroup.hpp"
-#include "logging/log.hpp"
-#include "memory/universe.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/os.hpp"
-#include "utilities/align.hpp"
-#include "utilities/bitMap.inline.hpp"
-#include "utilities/debug.hpp"
-
-G1CardLiveData::G1CardLiveData() :
-  _max_capacity(0),
-  _cards_per_region(0),
-  _gc_timestamp_at_create(0),
-  _live_regions(NULL),
-  _live_regions_size_in_bits(0),
-  _live_cards(NULL),
-  _live_cards_size_in_bits(0) {
-}
-
-G1CardLiveData::~G1CardLiveData()  {
-  free_large_bitmap(_live_cards, _live_cards_size_in_bits);
-  free_large_bitmap(_live_regions, _live_regions_size_in_bits);
-}
-
-G1CardLiveData::bm_word_t* G1CardLiveData::allocate_large_bitmap(size_t size_in_bits) {
-  size_t size_in_words = BitMap::calc_size_in_words(size_in_bits);
-
-  bm_word_t* map = MmapArrayAllocator<bm_word_t>::allocate(size_in_words, mtGC);
-
-  return map;
-}
-
-void G1CardLiveData::free_large_bitmap(bm_word_t* bitmap, size_t size_in_bits) {
-  MmapArrayAllocator<bm_word_t>::free(bitmap, BitMap::calc_size_in_words(size_in_bits));
-}
-
-void G1CardLiveData::initialize(size_t max_capacity, uint num_max_regions) {
-  assert(max_capacity % num_max_regions == 0,
-         "Given capacity must be evenly divisible by region size.");
-  size_t region_size = max_capacity / num_max_regions;
-  assert(region_size % (G1CardTable::card_size * BitsPerWord) == 0,
-         "Region size must be evenly divisible by area covered by a single word.");
-  _max_capacity = max_capacity;
-  _cards_per_region = region_size / G1CardTable::card_size;
-
-  _live_regions_size_in_bits = live_region_bitmap_size_in_bits();
-  _live_regions = allocate_large_bitmap(_live_regions_size_in_bits);
-  _live_cards_size_in_bits = live_card_bitmap_size_in_bits();
-  _live_cards = allocate_large_bitmap(_live_cards_size_in_bits);
-}
-
-void G1CardLiveData::pretouch() {
-  live_cards_bm().pretouch();
-  live_regions_bm().pretouch();
-}
-
-size_t G1CardLiveData::live_region_bitmap_size_in_bits() const {
-  return _max_capacity / (_cards_per_region << G1CardTable::card_shift);
-}
-
-size_t G1CardLiveData::live_card_bitmap_size_in_bits() const {
-  return _max_capacity >> G1CardTable::card_shift;
-}
-
-// Helper class that provides functionality to generate the Live Data Count
-// information.
-class G1CardLiveDataHelper {
-private:
-  BitMapView _region_bm;
-  BitMapView _card_bm;
-
-  // The card number of the bottom of the G1 heap.
-  // Used in biasing indices into accounting card bitmaps.
-  BitMap::idx_t _heap_card_bias;
-
-  // Utility routine to set an exclusive range of bits on the given
-  // bitmap, optimized for very small ranges.
-  // There must be at least one bit to set.
-  void set_card_bitmap_range(BitMap::idx_t start_idx,
-                             BitMap::idx_t end_idx) {
-
-    // Set the exclusive bit range [start_idx, end_idx).
-    assert((end_idx - start_idx) > 0, "at least one bit");
-
-    // For small ranges use a simple loop; otherwise use set_range.
-    // The range is made up of the cards that are spanned by an object/mem
-    // region so 8 cards will allow up to object sizes up to 4K to be handled
-    // using the loop.
-    if ((end_idx - start_idx) <= 8) {
-      for (BitMap::idx_t i = start_idx; i < end_idx; i += 1) {
-        _card_bm.set_bit(i);
-      }
-    } else {
-      _card_bm.set_range(start_idx, end_idx);
-    }
-  }
-
-  // We cache the last mark set. This avoids setting the same bit multiple times.
-  // This is particularly interesting for dense bitmaps, as this avoids doing
-  // lots of work most of the time.
-  BitMap::idx_t _last_marked_bit_idx;
-
-  void clear_card_bitmap_range(HeapWord* start, HeapWord* end) {
-    BitMap::idx_t start_idx = card_live_bitmap_index_for(start);
-    BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTable::card_size));
-
-    _card_bm.clear_range(start_idx, end_idx);
-  }
-
-  // Mark the card liveness bitmap for the object spanning from start to end.
-  void mark_card_bitmap_range(HeapWord* start, HeapWord* end) {
-    BitMap::idx_t start_idx = card_live_bitmap_index_for(start);
-    BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTable::card_size));
-
-    assert((end_idx - start_idx) > 0, "Trying to mark zero sized range.");
-
-    if (start_idx == _last_marked_bit_idx) {
-      start_idx++;
-    }
-    if (start_idx == end_idx) {
-      return;
-    }
-
-    // Set the bits in the card bitmap for the cards spanned by this object.
-    set_card_bitmap_range(start_idx, end_idx);
-    _last_marked_bit_idx = end_idx - 1;
-  }
-
-  void reset_mark_cache() {
-    _last_marked_bit_idx = (BitMap::idx_t)-1;
-  }
-
-public:
-  // Returns the index in the per-card liveness count bitmap
-  // for the given address
-  inline BitMap::idx_t card_live_bitmap_index_for(HeapWord* addr) {
-    // Below, the term "card num" means the result of shifting an address
-    // by the card shift -- address 0 corresponds to card number 0.  One
-    // must subtract the card num of the bottom of the heap to obtain a
-    // card table index.
-    BitMap::idx_t card_num = uintptr_t(addr) >> G1CardTable::card_shift;
-    return card_num - _heap_card_bias;
-  }
-
-  // Takes a region that's not empty (i.e., it has at least one
-  // live object in it and sets its corresponding bit on the region
-  // bitmap to 1.
-  void set_bit_for_region(HeapRegion* hr) {
-    _region_bm.par_set_bit(hr->hrm_index());
-  }
-
-  void reset_live_data(HeapRegion* hr) {
-    clear_card_bitmap_range(hr->next_top_at_mark_start(), hr->end());
-  }
-
-  // Mark the range of bits covered by allocations done since the last marking
-  // in the given heap region, i.e. from NTAMS to top of the given region.
-  // Returns if there has been some allocation in this region since the last marking.
-  bool mark_allocated_since_marking(HeapRegion* hr) {
-    reset_mark_cache();
-
-    HeapWord* ntams = hr->next_top_at_mark_start();
-    HeapWord* top   = hr->top();
-
-    assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
-
-    // Mark the allocated-since-marking portion...
-    if (ntams < top) {
-      mark_card_bitmap_range(ntams, top);
-      return true;
-    } else {
-      return false;
-    }
-  }
-
-  // Mark the range of bits covered by live objects on the mark bitmap between
-  // bottom and NTAMS of the given region.
-  // Returns the number of live bytes marked within that area for the given
-  // heap region.
-  size_t mark_marked_during_marking(G1CMBitMap* mark_bitmap, HeapRegion* hr) {
-    reset_mark_cache();
-
-    size_t marked_bytes = 0;
-
-    HeapWord* ntams = hr->next_top_at_mark_start();
-    HeapWord* start = hr->bottom();
-
-    if (ntams <= start) {
-      // Skip empty regions.
-      return 0;
-    }
-    if (hr->is_humongous()) {
-      HeapRegion* start_region = hr->humongous_start_region();
-      if (mark_bitmap->is_marked(start_region->bottom())) {
-        mark_card_bitmap_range(start, hr->top());
-        return pointer_delta(hr->top(), start, 1);
-      } else {
-        // Humongous start object was actually dead.
-        return 0;
-      }
-    }
-
-    assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
-           "Preconditions not met - "
-           "start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT,
-           p2i(start), p2i(ntams), p2i(hr->end()));
-
-    // Find the first marked object at or after "start".
-    start = mark_bitmap->get_next_marked_addr(start, ntams);
-    while (start < ntams) {
-      oop obj = oop(start);
-      size_t obj_size = obj->size();
-      HeapWord* obj_end = start + obj_size;
-
-      assert(obj_end <= hr->end(), "Humongous objects must have been handled elsewhere.");
-
-      mark_card_bitmap_range(start, obj_end);
-
-      // Add the size of this object to the number of marked bytes.
-      marked_bytes += obj_size * HeapWordSize;
-
-      // Find the next marked object after this one.
-      start = mark_bitmap->get_next_marked_addr(obj_end, ntams);
-    }
-
-    return marked_bytes;
-  }
-
-  G1CardLiveDataHelper(G1CardLiveData* live_data, HeapWord* base_address) :
-    _region_bm(live_data->live_regions_bm()),
-    _card_bm(live_data->live_cards_bm()) {
-    // Calculate the card number for the bottom of the heap. Used
-    // in biasing indexes into the accounting card bitmaps.
-    _heap_card_bias =
-      uintptr_t(base_address) >> G1CardTable::card_shift;
-  }
-};
-
-class G1CreateCardLiveDataTask: public AbstractGangTask {
-  // Aggregate the counting data that was constructed concurrently
-  // with marking.
-  class G1CreateLiveDataClosure : public HeapRegionClosure {
-    G1CardLiveDataHelper _helper;
-
-    G1CMBitMap* _mark_bitmap;
-
-    G1ConcurrentMark* _cm;
-  public:
-    G1CreateLiveDataClosure(G1CollectedHeap* g1h,
-                            G1ConcurrentMark* cm,
-                            G1CMBitMap* mark_bitmap,
-                            G1CardLiveData* live_data) :
-      HeapRegionClosure(),
-      _helper(live_data, g1h->reserved_region().start()),
-      _mark_bitmap(mark_bitmap),
-      _cm(cm) { }
-
-    bool do_heap_region(HeapRegion* hr) {
-      size_t marked_bytes = _helper.mark_marked_during_marking(_mark_bitmap, hr);
-      if (marked_bytes > 0) {
-        hr->add_to_marked_bytes(marked_bytes);
-      }
-
-      return (_cm->do_yield_check() && _cm->has_aborted());
-    }
-  };
-
-  G1ConcurrentMark* _cm;
-  G1CardLiveData* _live_data;
-  HeapRegionClaimer _hr_claimer;
-
-public:
-  G1CreateCardLiveDataTask(G1CMBitMap* bitmap,
-                           G1CardLiveData* live_data,
-                           uint n_workers) :
-      AbstractGangTask("G1 Create Live Data"),
-      _live_data(live_data),
-      _hr_claimer(n_workers) {
-  }
-
-  void work(uint worker_id) {
-    SuspendibleThreadSetJoiner sts_join;
-
-    G1CollectedHeap* g1h = G1CollectedHeap::heap();
-    G1ConcurrentMark* cm = g1h->concurrent_mark();
-    G1CreateLiveDataClosure cl(g1h, cm, cm->next_mark_bitmap(), _live_data);
-    g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id);
-  }
-};
-
-void G1CardLiveData::create(WorkGang* workers, G1CMBitMap* mark_bitmap) {
-  _gc_timestamp_at_create = G1CollectedHeap::heap()->get_gc_time_stamp();
-
-  uint n_workers = workers->active_workers();
-
-  G1CreateCardLiveDataTask cl(mark_bitmap,
-                              this,
-                              n_workers);
-  workers->run_task(&cl);
-}
-
-class G1FinalizeCardLiveDataTask: public AbstractGangTask {
-  // Finalizes the liveness counting data.
-  // Sets the bits corresponding to the interval [NTAMS, top]
-  // (which contains the implicitly live objects) in the
-  // card liveness bitmap. Also sets the bit for each region
-  // containing live data, in the region liveness bitmap.
-  class G1FinalizeCardLiveDataClosure: public HeapRegionClosure {
-  private:
-    G1CardLiveDataHelper _helper;
-
-    uint _gc_timestamp_at_create;
-
-    bool has_been_reclaimed(HeapRegion* hr) const {
-      return hr->get_gc_time_stamp() > _gc_timestamp_at_create;
-    }
-  public:
-    G1FinalizeCardLiveDataClosure(G1CollectedHeap* g1h,
-                                  G1CMBitMap* bitmap,
-                                  G1CardLiveData* live_data) :
-      HeapRegionClosure(),
-      _helper(live_data, g1h->reserved_region().start()),
-      _gc_timestamp_at_create(live_data->gc_timestamp_at_create()) { }
-
-    bool do_heap_region(HeapRegion* hr) {
-      if (has_been_reclaimed(hr)) {
-        _helper.reset_live_data(hr);
-      }
-      bool allocated_since_marking = _helper.mark_allocated_since_marking(hr);
-      if (allocated_since_marking || hr->next_marked_bytes() > 0) {
-        _helper.set_bit_for_region(hr);
-      }
-      return false;
-    }
-  };
-
-  G1CMBitMap* _bitmap;
-
-  G1CardLiveData* _live_data;
-
-  HeapRegionClaimer _hr_claimer;
-
-public:
-  G1FinalizeCardLiveDataTask(G1CMBitMap* bitmap, G1CardLiveData* live_data, uint n_workers) :
-    AbstractGangTask("G1 Finalize Card Live Data"),
-    _bitmap(bitmap),
-    _live_data(live_data),
-    _hr_claimer(n_workers) {
-  }
-
-  void work(uint worker_id) {
-    G1FinalizeCardLiveDataClosure cl(G1CollectedHeap::heap(), _bitmap, _live_data);
-
-    G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id);
-  }
-};
-
-void G1CardLiveData::finalize(WorkGang* workers, G1CMBitMap* mark_bitmap) {
-  // Finalize the live data.
-  G1FinalizeCardLiveDataTask cl(mark_bitmap,
-                                this,
-                                workers->active_workers());
-  workers->run_task(&cl);
-}
-
-class G1ClearCardLiveDataTask : public AbstractGangTask {
-  BitMapView _bitmap;
-  size_t     _num_chunks;
-  size_t     _cur_chunk;
-public:
-  G1ClearCardLiveDataTask(const BitMapView& bitmap, size_t num_tasks) :
-    AbstractGangTask("G1 Clear Card Live Data"),
-    _bitmap(bitmap),
-    _num_chunks(num_tasks),
-    _cur_chunk(0) {
-  }
-
-  static size_t chunk_size() { return M; }
-
-  virtual void work(uint worker_id) {
-    while (true) {
-      size_t to_process = Atomic::add(1u, &_cur_chunk) - 1;
-      if (to_process >= _num_chunks) {
-        break;
-      }
-
-      BitMap::idx_t start = M * BitsPerByte * to_process;
-      BitMap::idx_t end = MIN2(start + M * BitsPerByte, _bitmap.size());
-      _bitmap.clear_range(start, end);
-    }
-  }
-};
-
-void G1CardLiveData::clear(WorkGang* workers) {
-  guarantee(Universe::is_fully_initialized(), "Should not call this during initialization.");
-
-  size_t const num_chunks = align_up(live_cards_bm().size_in_bytes(), G1ClearCardLiveDataTask::chunk_size()) / G1ClearCardLiveDataTask::chunk_size();
-  uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
-
-  G1ClearCardLiveDataTask cl(live_cards_bm(), num_chunks);
-
-  log_debug(gc, ergo)("Running %s using %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
-  workers->run_task(&cl, num_workers);
-
-  // The region live bitmap is always very small, even for huge heaps. Clear
-  // directly.
-  live_regions_bm().clear();
-}
-
-class G1VerifyCardLiveDataTask: public AbstractGangTask {
-  // Heap region closure used for verifying the live count data
-  // that was created concurrently and finalized during
-  // the remark pause. This closure is applied to the heap
-  // regions during the STW cleanup pause.
-  class G1VerifyCardLiveDataClosure: public HeapRegionClosure {
-  private:
-    G1CollectedHeap* _g1h;
-    G1CMBitMap* _mark_bitmap;
-    G1CardLiveDataHelper _helper;
-
-    G1CardLiveData* _act_live_data;
-
-    G1CardLiveData* _exp_live_data;
-
-    int _failures;
-
-    // Completely recreates the live data count for the given heap region and
-    // returns the number of bytes marked.
-    size_t create_live_data_count(HeapRegion* hr) {
-      size_t bytes_marked = _helper.mark_marked_during_marking(_mark_bitmap, hr);
-      bool allocated_since_marking = _helper.mark_allocated_since_marking(hr);
-      if (allocated_since_marking || bytes_marked > 0) {
-        _helper.set_bit_for_region(hr);
-      }
-      return bytes_marked;
-    }
-  public:
-    G1VerifyCardLiveDataClosure(G1CollectedHeap* g1h,
-                                G1CMBitMap* mark_bitmap,
-                                G1CardLiveData* act_live_data,
-                                G1CardLiveData* exp_live_data) :
-      _g1h(g1h),
-      _mark_bitmap(mark_bitmap),
-      _helper(exp_live_data, g1h->reserved_region().start()),
-      _act_live_data(act_live_data),
-      _exp_live_data(exp_live_data),
-      _failures(0) { }
-
-    int failures() const { return _failures; }
-
-    bool do_heap_region(HeapRegion* hr) {
-      int failures = 0;
-
-      // Walk the marking bitmap for this region and set the corresponding bits
-      // in the expected region and card bitmaps.
-      size_t exp_marked_bytes = create_live_data_count(hr);
-      size_t act_marked_bytes = hr->next_marked_bytes();
-      // Verify the marked bytes for this region.
-
-      if (exp_marked_bytes != act_marked_bytes) {
-        log_error(gc)("Expected marked bytes " SIZE_FORMAT " != actual marked bytes " SIZE_FORMAT " in region %u", exp_marked_bytes, act_marked_bytes, hr->hrm_index());
-        failures += 1;
-      } else if (exp_marked_bytes > HeapRegion::GrainBytes) {
-        log_error(gc)("Expected marked bytes " SIZE_FORMAT " larger than possible " SIZE_FORMAT " in region %u", exp_marked_bytes, HeapRegion::GrainBytes, hr->hrm_index());
-        failures += 1;
-      }
-
-      // Verify the bit, for this region, in the actual and expected
-      // (which was just calculated) region bit maps.
-      uint index = hr->hrm_index();
-
-      bool expected = _exp_live_data->is_region_live(index);
-      bool actual = _act_live_data->is_region_live(index);
-      if (expected != actual) {
-        log_error(gc)("Expected liveness %d not equal actual %d in region %u", expected, actual, hr->hrm_index());
-        failures += 1;
-      }
-
-      // Verify that the card bit maps for the cards spanned by the current
-      // region match.
-      BitMap::idx_t start_idx = _helper.card_live_bitmap_index_for(hr->bottom());
-      BitMap::idx_t end_idx = _helper.card_live_bitmap_index_for(hr->top());
-
-      for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
-        expected = _exp_live_data->is_card_live_at(i);
-        actual = _act_live_data->is_card_live_at(i);
-
-        if (expected != actual) {
-          log_error(gc)("Expected card liveness %d not equal actual card liveness %d at card " SIZE_FORMAT " in region %u", expected, actual, i, hr->hrm_index());
-          failures += 1;
-        }
-      }
-
-      _failures += failures;
-
-      // We could stop iteration over the heap when we
-      // find the first violating region by returning true.
-      return false;
-    }
-  };
-protected:
-  G1CollectedHeap* _g1h;
-  G1CMBitMap* _mark_bitmap;
-
-  G1CardLiveData* _act_live_data;
-
-  G1CardLiveData _exp_live_data;
-
-  int  _failures;
-
-  HeapRegionClaimer _hr_claimer;
-
-public:
-  G1VerifyCardLiveDataTask(G1CMBitMap* bitmap,
-                           G1CardLiveData* act_live_data,
-                           uint n_workers)
-  : AbstractGangTask("G1 Verify Card Live Data"),
-    _g1h(G1CollectedHeap::heap()),
-    _mark_bitmap(bitmap),
-    _act_live_data(act_live_data),
-    _exp_live_data(),
-    _failures(0),
-    _hr_claimer(n_workers) {
-    assert(VerifyDuringGC, "don't call this otherwise");
-    _exp_live_data.initialize(_g1h->max_capacity(), _g1h->max_regions());
-  }
-
-  void work(uint worker_id) {
-    G1VerifyCardLiveDataClosure cl(_g1h,
-                                   _mark_bitmap,
-                                   _act_live_data,
-                                   &_exp_live_data);
-    _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id);
-
-    Atomic::add(cl.failures(), &_failures);
-  }
-
-  int failures() const { return _failures; }
-};
-
-void G1CardLiveData::verify(WorkGang* workers, G1CMBitMap* actual_bitmap) {
-    ResourceMark rm;
-
-    G1VerifyCardLiveDataTask cl(actual_bitmap,
-                                this,
-                                workers->active_workers());
-    workers->run_task(&cl);
-
-    guarantee(cl.failures() == 0, "Unexpected accounting failures");
-}
-
-#ifndef PRODUCT
-void G1CardLiveData::verify_is_clear() {
-  assert(live_cards_bm().count_one_bits() == 0, "Live cards bitmap must be clear.");
-  assert(live_regions_bm().count_one_bits() == 0, "Live regions bitmap must be clear.");
-}
-#endif
--- a/src/hotspot/share/gc/g1/g1CardLiveData.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_G1CARDLIVEDATA_HPP
-#define SHARE_VM_GC_G1_G1CARDLIVEDATA_HPP
-
-#include "gc/g1/g1CollectedHeap.hpp"
-#include "utilities/bitMap.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-class G1CollectedHeap;
-class G1CMBitMap;
-class WorkGang;
-
-// Information about object liveness on the Java heap on a "card" basis.
-// Can be used for various purposes, like as remembered set for completely
-// coarsened remembered sets, scrubbing remembered sets or estimating liveness.
-// This information is created as part of the concurrent marking cycle.
-class G1CardLiveData {
-  friend class G1CardLiveDataHelper;
-  friend class G1VerifyCardLiveDataTask;
-private:
-  typedef BitMap::bm_word_t bm_word_t;
-  // Store some additional information about the covered area to be able to test.
-  size_t _max_capacity;
-  size_t _cards_per_region;
-
-  // Regions may be reclaimed while concurrently creating live data (e.g. due to humongous
-  // eager reclaim). This results in wrong live data for these regions at the end.
-  // So we need to somehow detect these regions, and during live data finalization completely
-  // recreate their information.
-  // This _gc_timestamp_at_create tracks the global timestamp when live data creation
-  // has started. Any regions with a higher time stamp have been cleared after that
-  // point in time, and need re-finalization.
-  // Unsynchronized access to this variable is okay, since this value is only set during a
-  // concurrent phase, and read only at the Cleanup safepoint. I.e. there is always
-  // full memory synchronization inbetween.
-  uint _gc_timestamp_at_create;
-  // The per-card liveness bitmap.
-  bm_word_t* _live_cards;
-  size_t _live_cards_size_in_bits;
-  // The per-region liveness bitmap.
-  bm_word_t* _live_regions;
-  size_t _live_regions_size_in_bits;
-  // The bits in this bitmap contain for every card whether it contains
-  // at least part of at least one live object.
-  BitMapView live_cards_bm() const { return BitMapView(_live_cards, _live_cards_size_in_bits); }
-  // The bits in this bitmap indicate that a given region contains some live objects.
-  BitMapView live_regions_bm() const { return BitMapView(_live_regions, _live_regions_size_in_bits); }
-
-  // Allocate a "large" bitmap from virtual memory with the given size in bits.
-  bm_word_t* allocate_large_bitmap(size_t size_in_bits);
-  void free_large_bitmap(bm_word_t* map, size_t size_in_bits);
-
-  inline BitMapView live_card_bitmap(uint region);
-
-  inline bool is_card_live_at(BitMap::idx_t idx) const;
-
-  size_t live_region_bitmap_size_in_bits() const;
-  size_t live_card_bitmap_size_in_bits() const;
-public:
-  uint gc_timestamp_at_create() const { return _gc_timestamp_at_create; }
-
-  inline bool is_region_live(uint region) const;
-
-  inline void remove_nonlive_cards(uint region, BitMap* bm);
-  inline void remove_nonlive_regions(BitMap* bm);
-
-  G1CardLiveData();
-  ~G1CardLiveData();
-
-  void initialize(size_t max_capacity, uint num_max_regions);
-  void pretouch();
-
-  // Create the initial liveness data based on the marking result from the bottom
-  // to the ntams of every region in the heap and the marks in the given bitmap.
-  void create(WorkGang* workers, G1CMBitMap* mark_bitmap);
-  // Finalize the liveness data.
-  void finalize(WorkGang* workers, G1CMBitMap* mark_bitmap);
-
-  // Verify that the liveness count data created concurrently matches one created
-  // during this safepoint.
-  void verify(WorkGang* workers, G1CMBitMap* actual_bitmap);
-  // Clear all data structures, prepare for next processing.
-  void clear(WorkGang* workers);
-
-  void verify_is_clear() PRODUCT_RETURN;
-};
-
-#endif /* SHARE_VM_GC_G1_G1CARDLIVEDATA_HPP */
-
--- a/src/hotspot/share/gc/g1/g1CardLiveData.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_G1CARDLIVEDATA_INLINE_HPP
-#define SHARE_VM_GC_G1_G1CARDLIVEDATA_INLINE_HPP
-
-#include "gc/g1/g1CardLiveData.hpp"
-#include "utilities/bitMap.inline.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-inline BitMapView G1CardLiveData::live_card_bitmap(uint region) {
-  return BitMapView(_live_cards + ((size_t)region * _cards_per_region >> LogBitsPerWord), _cards_per_region);
-}
-
-inline bool G1CardLiveData::is_card_live_at(BitMap::idx_t idx) const {
-  return live_cards_bm().at(idx);
-}
-
-inline bool G1CardLiveData::is_region_live(uint region) const {
-  return live_regions_bm().at(region);
-}
-
-inline void G1CardLiveData::remove_nonlive_cards(uint region, BitMap* bm) {
-  bm->set_intersection(live_card_bitmap(region));
-}
-
-inline void G1CardLiveData::remove_nonlive_regions(BitMap* bm) {
-  bm->set_intersection(live_regions_bm());
-}
-
-#endif /* SHARE_VM_GC_G1_G1CARDLIVEDATA_INLINE_HPP */
--- a/src/hotspot/share/gc/g1/g1CodeBlobClosure.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1CodeBlobClosure.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -28,14 +28,16 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 
 template <typename T>
 void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop_work(T* p) {
   _work->do_oop(p);
-  T oop_or_narrowoop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(oop_or_narrowoop)) {
-    oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
+  T oop_or_narrowoop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(oop_or_narrowoop)) {
+    oop o = CompressedOops::decode_not_null(oop_or_narrowoop);
     HeapRegion* hr = _g1h->heap_region_containing(o);
     assert(!_g1h->is_in_cset(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in collection set then evacuation failed and nm must already be in the remset");
     hr->add_strong_code_root(_nm);
--- a/src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -30,6 +30,7 @@
 #include "gc/g1/heapRegion.hpp"
 #include "memory/heap.hpp"
 #include "memory/iterator.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "utilities/hashtable.inline.hpp"
 #include "utilities/stack.inline.hpp"
@@ -274,7 +275,7 @@
 
     template <typename T>
     void do_oop_work(T* p) {
-      if (_hr->is_in(oopDesc::load_decode_heap_oop(p))) {
+      if (_hr->is_in(RawAccess<>::oop_load(p))) {
         _points_into = true;
       }
     }
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -29,7 +29,6 @@
 #include "code/codeCache.hpp"
 #include "code/icBuffer.hpp"
 #include "gc/g1/bufferingOopClosure.hpp"
-#include "gc/g1/concurrentMarkThread.inline.hpp"
 #include "gc/g1/g1Allocator.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectionSet.hpp"
@@ -37,6 +36,7 @@
 #include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1ConcurrentRefine.hpp"
 #include "gc/g1/g1ConcurrentRefineThread.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
 #include "gc/g1/g1EvacStats.inline.hpp"
 #include "gc/g1/g1FullCollector.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
@@ -62,7 +62,7 @@
 #include "gc/shared/adaptiveSizePolicy.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
 #include "gc/shared/gcId.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
@@ -77,6 +77,8 @@
 #include "memory/allocation.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/resolvedMethodTable.hpp"
 #include "runtime/atomic.hpp"
@@ -154,63 +156,13 @@
 
 // Private methods.
 
-HeapRegion*
-G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
-  MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
-  while (!_secondary_free_list.is_empty() || free_regions_coming()) {
-    if (!_secondary_free_list.is_empty()) {
-      log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
-                                      "secondary_free_list has %u entries",
-                                      _secondary_free_list.length());
-      // It looks as if there are free regions available on the
-      // secondary_free_list. Let's move them to the free_list and try
-      // again to allocate from it.
-      append_secondary_free_list();
-
-      assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
-             "empty we should have moved at least one entry to the free_list");
-      HeapRegion* res = _hrm.allocate_free_region(is_old);
-      log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
-                                      "allocated " HR_FORMAT " from secondary_free_list",
-                                      HR_FORMAT_PARAMS(res));
-      return res;
-    }
-
-    // Wait here until we get notified either when (a) there are no
-    // more free regions coming or (b) some regions have been moved on
-    // the secondary_free_list.
-    SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
-  }
-
-  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
-                                  "could not allocate from secondary_free_list");
-  return NULL;
-}
-
 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
   assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
          "the only time we use this to allocate a humongous region is "
          "when we are allocating a single humongous region");
 
-  HeapRegion* res;
-  if (G1StressConcRegionFreeing) {
-    if (!_secondary_free_list.is_empty()) {
-      log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
-                                      "forced to look at the secondary_free_list");
-      res = new_region_try_secondary_free_list(is_old);
-      if (res != NULL) {
-        return res;
-      }
-    }
-  }
-
-  res = _hrm.allocate_free_region(is_old);
-
-  if (res == NULL) {
-    log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
-                                    "res == NULL, trying the secondary_free_list");
-    res = new_region_try_secondary_free_list(is_old);
-  }
+  HeapRegion* res = _hrm.allocate_free_region(is_old);
+
   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
     // Currently, only attempts to allocate GC alloc regions set
     // do_expand to true. So, we should only reach here during a
@@ -301,12 +253,14 @@
   // that there is a single object that starts at the bottom of the
   // first region.
   first_hr->set_starts_humongous(obj_top, word_fill_size);
+  _g1_policy->remset_tracker()->update_at_allocate(first_hr);
   // Then, if there are any, we will set up the "continues
   // humongous" regions.
   HeapRegion* hr = NULL;
   for (uint i = first + 1; i <= last; ++i) {
     hr = region_at(i);
     hr->set_continues_humongous(first_hr);
+    _g1_policy->remset_tracker()->update_at_allocate(hr);
   }
 
   // Up to this point no concurrent thread would have been able to
@@ -376,17 +330,6 @@
       first = hr->hrm_index();
     }
   } else {
-    // We can't allocate humongous regions spanning more than one region while
-    // cleanupComplete() is running, since some of the regions we find to be
-    // empty might not yet be added to the free list. It is not straightforward
-    // to know in which list they are on so that we can remove them. We only
-    // need to do this if we need to allocate more than one region to satisfy the
-    // current humongous allocation request. If we are only allocating one region
-    // we use the one-region region allocation code (see above), that already
-    // potentially waits for regions from the secondary free list.
-    wait_while_free_regions_coming();
-    append_secondary_free_list_if_not_empty_with_lock();
-
     // Policy: Try only empty regions (i.e. already committed first). Maybe we
     // are lucky enough to find some.
     first = _hrm.find_contiguous_only_empty(obj_regions);
@@ -1022,11 +965,6 @@
 }
 
 void G1CollectedHeap::abort_concurrent_cycle() {
-  // Note: When we have a more flexible GC logging framework that
-  // allows us to add optional attributes to a GC log record we
-  // could consider timing and reporting how long we wait in the
-  // following two methods.
-  wait_while_free_regions_coming();
   // If we start the compaction before the CM threads finish
   // scanning the root regions we might trip them over as we'll
   // be moving objects / updating references. So let's wait until
@@ -1034,7 +972,6 @@
   // early.
   _cm->root_regions()->abort();
   _cm->root_regions()->wait_until_scan_finished();
-  append_secondary_free_list_if_not_empty_with_lock();
 
   // Disable discovery and empty the discovered lists
   // for the CM ref processor.
@@ -1044,7 +981,7 @@
 
   // Abandon current iterations of concurrent marking and concurrent
   // refinement, if any are in progress.
-  concurrent_mark()->abort();
+  concurrent_mark()->concurrent_cycle_abort();
 }
 
 void G1CollectedHeap::prepare_heap_for_full_collection() {
@@ -1060,7 +997,6 @@
   abandon_collection_set(collection_set());
 
   tear_down_region_sets(false /* free_list_only */);
-  collector_state()->set_gcs_are_young(true);
 }
 
 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
@@ -1105,7 +1041,6 @@
 }
 
 void G1CollectedHeap::verify_after_full_collection() {
-  check_gc_time_stamps();
   _hrm.verify_optional();
   _verifier->verify_region_sets_optional();
   _verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
@@ -1472,14 +1407,11 @@
   _cr(NULL),
   _g1mm(NULL),
   _preserved_marks_set(true /* in_c_heap */),
-  _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
   _humongous_reclaim_candidates(),
   _has_humongous_reclaim_candidates(false),
   _archive_allocator(NULL),
-  _free_regions_coming(false),
-  _gc_time_stamp(0),
   _summary_bytes_used(0),
   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
   _old_evac_stats("Old", OldPLABSize, PLABWeight),
@@ -1896,41 +1828,6 @@
   return _hrm.total_free_bytes();
 }
 
-void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
-  hr->reset_gc_time_stamp();
-}
-
-#ifndef PRODUCT
-
-class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
-private:
-  unsigned _gc_time_stamp;
-  bool _failures;
-
-public:
-  CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
-    _gc_time_stamp(gc_time_stamp), _failures(false) { }
-
-  virtual bool do_heap_region(HeapRegion* hr) {
-    unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
-    if (_gc_time_stamp != region_gc_time_stamp) {
-      log_error(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr),
-                            region_gc_time_stamp, _gc_time_stamp);
-      _failures = true;
-    }
-    return false;
-  }
-
-  bool failures() { return _failures; }
-};
-
-void G1CollectedHeap::check_gc_time_stamps() {
-  CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
-  heap_region_iterate(&cl);
-  guarantee(!cl.failures(), "all GC time stamps should have been reset");
-}
-#endif // PRODUCT
-
 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
   _hot_card_cache->drain(cl, worker_i);
 }
@@ -2351,7 +2248,7 @@
 void G1CollectedHeap::print_regions_on(outputStream* st) const {
   st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, "
                "HS=humongous(starts), HC=humongous(continues), "
-               "CS=collection set, F=free, A=archive, TS=gc time stamp, "
+               "CS=collection set, F=free, A=archive, "
                "TAMS=top-at-mark-start (previous, next)");
   PrintRegionClosure blk(st);
   heap_region_iterate(&blk);
@@ -2482,7 +2379,7 @@
 G1CollectedHeap* G1CollectedHeap::heap() {
   CollectedHeap* heap = Universe::heap();
   assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
-  assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
+  assert(heap->kind() == CollectedHeap::G1, "Invalid name");
   return (G1CollectedHeap*)heap;
 }
 
@@ -2497,9 +2394,6 @@
   increment_total_collections(full /* full gc */);
   if (full) {
     increment_old_marking_cycles_started();
-    reset_gc_time_stamp();
-  } else {
-    increment_gc_time_stamp();
   }
 
   // Fill TLAB's and such
@@ -2559,8 +2453,7 @@
   return result;
 }
 
-void
-G1CollectedHeap::doConcurrentMark() {
+void G1CollectedHeap::do_concurrent_mark() {
   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
   if (!_cmThread->in_progress()) {
     _cmThread->set_started();
@@ -2581,6 +2474,16 @@
   return buffer_size * buffer_num + extra_cards;
 }
 
+bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
+  // We don't nominate objects with many remembered set entries, on
+  // the assumption that such objects are likely still live.
+  HeapRegionRemSet* rem_set = r->rem_set();
+
+  return G1EagerReclaimHumongousObjectsWithStaleRefs ?
+         rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
+         G1EagerReclaimHumongousObjects && rem_set->is_empty();
+}
+
 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
  private:
   size_t _total_humongous;
@@ -2588,26 +2491,22 @@
 
   DirtyCardQueue _dcq;
 
-  // We don't nominate objects with many remembered set entries, on
-  // the assumption that such objects are likely still live.
-  bool is_remset_small(HeapRegion* region) const {
-    HeapRegionRemSet* const rset = region->rem_set();
-    return G1EagerReclaimHumongousObjectsWithStaleRefs
-      ? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)
-      : rset->is_empty();
-  }
-
-  bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
+  bool humongous_region_is_candidate(G1CollectedHeap* g1h, HeapRegion* region) const {
     assert(region->is_starts_humongous(), "Must start a humongous object");
 
     oop obj = oop(region->bottom());
 
     // Dead objects cannot be eager reclaim candidates. Due to class
     // unloading it is unsafe to query their classes so we return early.
-    if (heap->is_obj_dead(obj, region)) {
+    if (g1h->is_obj_dead(obj, region)) {
       return false;
     }
 
+    // If we do not have a complete remembered set for the region, then we can
+    // not be sure that we have all references to it.
+    if (!region->rem_set()->is_complete()) {
+      return false;
+    }
     // Candidate selection must satisfy the following constraints
     // while concurrent marking is in progress:
     //
@@ -2644,7 +2543,8 @@
     // important use case for eager reclaim, and this special handling
     // may reduce needed headroom.
 
-    return obj->is_typeArray() && is_remset_small(region);
+    return obj->is_typeArray() &&
+           g1h->is_potential_eager_reclaim_candidate(region);
   }
 
  public:
@@ -2692,7 +2592,15 @@
         assert(hrrs.n_yielded() == r->rem_set()->occupied(),
                "Remembered set hash maps out of sync, cur: " SIZE_FORMAT " entries, next: " SIZE_FORMAT " entries",
                hrrs.n_yielded(), r->rem_set()->occupied());
-        r->rem_set()->clear_locked();
+        // We should only clear the card based remembered set here as we will not
+        // implicitly rebuild anything else during eager reclaim. Note that at the moment
+        // (and probably never) we do not enter this path if there are other kind of
+        // remembered sets for this region.
+        r->rem_set()->clear_locked(true /* only_cardset */);
+        // Clear_locked() above sets the state to Empty. However we want to continue
+        // collecting remembered set entries for humongous regions that were not
+        // reclaimed.
+        r->rem_set()->set_state_complete();
       }
       assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
     }
@@ -2846,28 +2754,28 @@
   // We should not be doing initial mark unless the conc mark thread is running
   if (!_cmThread->should_terminate()) {
     // This call will decide whether this pause is an initial-mark
-    // pause. If it is, during_initial_mark_pause() will return true
+    // pause. If it is, in_initial_mark_gc() will return true
     // for the duration of this pause.
     g1_policy()->decide_on_conc_mark_initiation();
   }
 
   // We do not allow initial-mark to be piggy-backed on a mixed GC.
-  assert(!collector_state()->during_initial_mark_pause() ||
-          collector_state()->gcs_are_young(), "sanity");
+  assert(!collector_state()->in_initial_mark_gc() ||
+          collector_state()->in_young_only_phase(), "sanity");
 
   // We also do not allow mixed GCs during marking.
-  assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
+  assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
 
   // Record whether this pause is an initial mark. When the current
   // thread has completed its logging output and it's safe to signal
   // the CM thread, the flag's value in the policy has been reset.
-  bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
+  bool should_start_conc_mark = collector_state()->in_initial_mark_gc();
 
   // Inner scope for scope based logging, timers, and stats collection
   {
     EvacuationInfo evacuation_info;
 
-    if (collector_state()->during_initial_mark_pause()) {
+    if (collector_state()->in_initial_mark_gc()) {
       // We are about to start a marking cycle, so we increment the
       // full collection counter.
       increment_old_marking_cycles_started();
@@ -2880,10 +2788,10 @@
 
     G1HeapVerifier::G1VerifyType verify_type;
     FormatBuffer<> gc_string("Pause ");
-    if (collector_state()->during_initial_mark_pause()) {
+    if (collector_state()->in_initial_mark_gc()) {
       gc_string.append("Initial Mark");
       verify_type = G1HeapVerifier::G1VerifyInitialMark;
-    } else if (collector_state()->gcs_are_young()) {
+    } else if (collector_state()->in_young_only_phase()) {
       gc_string.append("Young");
       verify_type = G1HeapVerifier::G1VerifyYoungOnly;
     } else {
@@ -2895,22 +2803,12 @@
     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
                                                                   workers()->active_workers(),
                                                                   Threads::number_of_non_daemon_threads());
-    workers()->update_active_workers(active_workers);
+    active_workers = workers()->update_active_workers(active_workers);
     log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
 
     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
     TraceMemoryManagerStats tms(&_memory_manager, gc_cause());
 
-    // If the secondary_free_list is not empty, append it to the
-    // free_list. No need to wait for the cleanup operation to finish;
-    // the region allocation code will check the secondary_free_list
-    // and wait if necessary. If the G1StressConcRegionFreeing flag is
-    // set, skip this step so that the region allocation code has to
-    // get entries from the secondary_free_list.
-    if (!G1StressConcRegionFreeing) {
-      append_secondary_free_list_if_not_empty_with_lock();
-    }
-
     G1HeapTransition heap_transition(this);
     size_t heap_used_bytes_before_gc = used();
 
@@ -2971,8 +2869,8 @@
 
         g1_policy()->record_collection_pause_start(sample_start_time_sec);
 
-        if (collector_state()->during_initial_mark_pause()) {
-          concurrent_mark()->checkpoint_roots_initial_pre();
+        if (collector_state()->in_initial_mark_gc()) {
+          concurrent_mark()->pre_initial_mark();
         }
 
         g1_policy()->finalize_collection_set(target_pause_time_ms, &_survivor);
@@ -3039,12 +2937,11 @@
           increase_used(g1_policy()->bytes_copied_during_gc());
         }
 
-        if (collector_state()->during_initial_mark_pause()) {
+        if (collector_state()->in_initial_mark_gc()) {
           // We have to do this before we notify the CM threads that
           // they can start working to make sure that all the
           // appropriate initialization is done on the CM object.
-          concurrent_mark()->checkpoint_roots_initial_post();
-          collector_state()->set_mark_in_progress(true);
+          concurrent_mark()->post_initial_mark();
           // Note that we don't actually trigger the CM thread at
           // this point. We do that later when we're sure that
           // the current thread has completed its logging output.
@@ -3151,7 +3048,7 @@
     // running. Note: of course, the actual marking work will
     // not start until the safepoint itself is released in
     // SuspendibleThreadSet::desynchronize().
-    doConcurrentMark();
+    do_concurrent_mark();
   }
 
   return true;
@@ -3810,7 +3707,7 @@
   virtual void do_oop(      oop* p) { do_oop_work(p); }
 
   template <class T> void do_oop_work(T* p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
+    oop obj = RawAccess<>::oop_load(p);
 
     if (_g1h->is_in_cset_or_humongous(obj)) {
       // If the referent object has been forwarded (either copied
@@ -4207,10 +4104,11 @@
 
   // If during an initial mark pause we install a pending list head which is not otherwise reachable
   // ensure that it is marked in the bitmap for concurrent marking to discover.
-  if (collector_state()->during_initial_mark_pause()) {
+  if (collector_state()->in_initial_mark_gc()) {
     oop pll_head = Universe::reference_pending_list();
     if (pll_head != NULL) {
-      _cm->mark_in_next_bitmap(pll_head);
+      // Any valid worker id is fine here as we are in the VM thread and single-threaded.
+      _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head);
     }
   }
 
@@ -4243,7 +4141,7 @@
   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
 
   // InitialMark needs claim bits to keep track of the marked-through CLDs.
-  if (collector_state()->during_initial_mark_pause()) {
+  if (collector_state()->in_initial_mark_gc()) {
     double start_clear_claimed_marks = os::elapsedTime();
 
     ClassLoaderDataGraph::clear_claimed_marks();
@@ -4399,16 +4297,16 @@
     _hot_card_cache->reset_card_counts(hr);
   }
   hr->hr_clear(skip_remset, true /* clear_space */, locked /* locked */);
+  _g1_policy->remset_tracker()->update_at_free(hr);
   free_list->add_ordered(hr);
 }
 
 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
-                                            FreeRegionList* free_list,
-                                            bool skip_remset) {
+                                            FreeRegionList* free_list) {
   assert(hr->is_humongous(), "this is only for humongous regions");
   assert(free_list != NULL, "pre-condition");
   hr->clear_humongous();
-  free_region(hr, free_list, skip_remset);
+  free_region(hr, free_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
 }
 
 void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed,
@@ -4433,29 +4331,6 @@
   decrease_used(bytes);
 }
 
-class G1ParScrubRemSetTask: public AbstractGangTask {
-protected:
-  G1RemSet* _g1rs;
-  HeapRegionClaimer _hrclaimer;
-
-public:
-  G1ParScrubRemSetTask(G1RemSet* g1_rs, uint num_workers) :
-    AbstractGangTask("G1 ScrubRS"),
-    _g1rs(g1_rs),
-    _hrclaimer(num_workers) {
-  }
-
-  void work(uint worker_id) {
-    _g1rs->scrub(worker_id, &_hrclaimer);
-  }
-};
-
-void G1CollectedHeap::scrub_rem_set() {
-  uint num_workers = workers()->active_workers();
-  G1ParScrubRemSetTask g1_par_scrub_rs_task(g1_rem_set(), num_workers);
-  workers()->run_task(&g1_par_scrub_rs_task);
-}
-
 class G1FreeCollectionSetTask : public AbstractGangTask {
 private:
 
@@ -4816,17 +4691,14 @@
                              obj->is_typeArray()
                             );
 
-    // Need to clear mark bit of the humongous object if already set.
-    if (next_bitmap->is_marked(r->bottom())) {
-      next_bitmap->clear(r->bottom());
-    }
+    g1h->concurrent_mark()->humongous_object_eagerly_reclaimed(r);
     _humongous_objects_reclaimed++;
     do {
       HeapRegion* next = g1h->next_region_in_humongous(r);
       _freed_bytes += r->used();
       r->set_containing_set(NULL);
       _humongous_regions_reclaimed++;
-      g1h->free_humongous_region(r, _free_region_list, false /* skip_remset */ );
+      g1h->free_humongous_region(r, _free_region_list);
       r = next;
     } while (r != NULL);
 
@@ -4898,44 +4770,6 @@
   collection_set->stop_incremental_building();
 }
 
-void G1CollectedHeap::set_free_regions_coming() {
-  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : setting free regions coming");
-
-  assert(!free_regions_coming(), "pre-condition");
-  _free_regions_coming = true;
-}
-
-void G1CollectedHeap::reset_free_regions_coming() {
-  assert(free_regions_coming(), "pre-condition");
-
-  {
-    MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
-    _free_regions_coming = false;
-    SecondaryFreeList_lock->notify_all();
-  }
-
-  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : reset free regions coming");
-}
-
-void G1CollectedHeap::wait_while_free_regions_coming() {
-  // Most of the time we won't have to wait, so let's do a quick test
-  // first before we take the lock.
-  if (!free_regions_coming()) {
-    return;
-  }
-
-  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : waiting for free regions");
-
-  {
-    MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
-    while (free_regions_coming()) {
-      SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
-    }
-  }
-
-  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : done waiting for free regions");
-}
-
 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
   return _allocator->is_retained_old_region(hr);
 }
@@ -5051,6 +4885,8 @@
   }
 
   bool do_heap_region(HeapRegion* r) {
+    // After full GC, no region should have a remembered set.
+    r->rem_set()->clear(true);
     if (r->is_empty()) {
       // Add free regions to the free list
       r->set_free();
@@ -5118,6 +4954,7 @@
       set_region_short_lived_locked(new_alloc_region);
       _hr_printer.alloc(new_alloc_region, !should_allocate);
       _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region);
+      _g1_policy->remset_tracker()->update_at_allocate(new_alloc_region);
       return new_alloc_region;
     }
   }
@@ -5161,10 +4998,6 @@
                                             !is_survivor,
                                             true /* do_expand */);
   if (new_alloc_region != NULL) {
-    // We really only need to do this for old regions given that we
-    // should never scan survivors. But it doesn't hurt to do it
-    // for survivors too.
-    new_alloc_region->record_timestamp();
     if (is_survivor) {
       new_alloc_region->set_survivor();
       _survivor.add(new_alloc_region);
@@ -5173,8 +5006,9 @@
       new_alloc_region->set_old();
       _verifier->check_bitmaps("Old Region Allocation", new_alloc_region);
     }
+    _g1_policy->remset_tracker()->update_at_allocate(new_alloc_region);
     _hr_printer.alloc(new_alloc_region);
-    bool during_im = collector_state()->during_initial_mark_pause();
+    bool during_im = collector_state()->in_initial_mark_gc();
     new_alloc_region->note_start_of_copying(during_im);
     return new_alloc_region;
   }
@@ -5184,7 +5018,7 @@
 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
                                              size_t allocated_bytes,
                                              InCSetState dest) {
-  bool during_im = collector_state()->during_initial_mark_pause();
+  bool during_im = collector_state()->in_initial_mark_gc();
   alloc_region->note_end_of_copying(during_im);
   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
   if (dest.is_old()) {
@@ -5215,9 +5049,9 @@
   nmethod* _nm;
 
   template <class T> void do_oop_work(T* p) {
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    T heap_oop = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(heap_oop)) {
+      oop obj = CompressedOops::decode_not_null(heap_oop);
       HeapRegion* hr = _g1h->heap_region_containing(obj);
       assert(!hr->is_continues_humongous(),
              "trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
@@ -5242,9 +5076,9 @@
   nmethod* _nm;
 
   template <class T> void do_oop_work(T* p) {
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    T heap_oop = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(heap_oop)) {
+      oop obj = CompressedOops::decode_not_null(heap_oop);
       HeapRegion* hr = _g1h->heap_region_containing(obj);
       assert(!hr->is_continues_humongous(),
              "trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -79,7 +79,7 @@
 class G1YoungRemSetSamplingThread;
 class HeapRegionRemSetIterator;
 class G1ConcurrentMark;
-class ConcurrentMarkThread;
+class G1ConcurrentMarkThread;
 class G1ConcurrentRefine;
 class GenerationCounters;
 class STWGCTimer;
@@ -163,11 +163,6 @@
 
   static size_t _humongous_object_threshold_in_words;
 
-  // The secondary free list which contains regions that have been
-  // freed up during the cleanup process. This will be appended to
-  // the master free list when appropriate.
-  FreeRegionList _secondary_free_list;
-
   // It keeps track of the old regions.
   HeapRegionSet _old_set;
 
@@ -267,8 +262,6 @@
   // If not, we can skip a few steps.
   bool _has_humongous_reclaim_candidates;
 
-  volatile uint _gc_time_stamp;
-
   G1HRPrinter _hr_printer;
 
   // It decides whether an explicit GC should start a concurrent cycle
@@ -380,13 +373,6 @@
 
   G1CollectionSet _collection_set;
 
-  // This is the second level of trying to allocate a new region. If
-  // new_region() didn't find a region on the free_list, this call will
-  // check whether there's anything available on the
-  // secondary_free_list and/or wait for more regions to appear on
-  // that list, if _free_regions_coming is set.
-  HeapRegion* new_region_try_secondary_free_list(bool is_old);
-
   // Try to allocate a single non-humongous HeapRegion sufficient for
   // an allocation of the given word_size. If do_expand is true,
   // attempt to expand the heap if necessary to satisfy the allocation
@@ -564,6 +550,9 @@
   void gc_prologue(bool full);
   void gc_epilogue(bool full);
 
+  // Does the given region fulfill remembered set based eager reclaim candidate requirements?
+  bool is_potential_eager_reclaim_candidate(HeapRegion* r) const;
+
   // Modify the reclaim candidate set and test for presence.
   // These are only valid for starts_humongous regions.
   inline void set_humongous_reclaim_candidate(uint region, bool value);
@@ -654,12 +643,11 @@
   // and calling free_region() for each of them. The freed regions
   // will be added to the free list that's passed as a parameter (this
   // is usually a local list which will be appended to the master free
-  // list later). The used bytes of freed regions are accumulated in
-  // pre_used. If skip_remset is true, the region's RSet will not be freed
-  // up. The assumption is that this will be done later.
+  // list later).
+  // The method assumes that only a single thread is ever calling
+  // this for a particular region at once.
   void free_humongous_region(HeapRegion* hr,
-                             FreeRegionList* free_list,
-                             bool skip_remset);
+                             FreeRegionList* free_list);
 
   // Facility for allocating in 'archive' regions in high heap memory and
   // recording the allocated ranges. These should all be called from the
@@ -778,7 +766,7 @@
 
   // The concurrent marker (and the thread it runs in.)
   G1ConcurrentMark* _cm;
-  ConcurrentMarkThread* _cmThread;
+  G1ConcurrentMarkThread* _cmThread;
 
   // The concurrent refiner.
   G1ConcurrentRefine* _cr;
@@ -824,9 +812,9 @@
   // Set whether G1EvacuationFailureALot should be in effect
   // for the current GC (based upon the type of GC and which
   // command line flags are set);
-  inline bool evacuation_failure_alot_for_gc_type(bool gcs_are_young,
+  inline bool evacuation_failure_alot_for_gc_type(bool for_young_gc,
                                                   bool during_initial_mark,
-                                                  bool during_marking);
+                                                  bool mark_or_rebuild_in_progress);
 
   inline void set_evacuation_failure_alot_for_current_gc();
 
@@ -916,8 +904,6 @@
   // discovery.
   G1CMIsAliveClosure _is_alive_closure_cm;
 
-  volatile bool _free_regions_coming;
-
 public:
 
   RefToScanQueue *task_queue(uint i) const;
@@ -955,7 +941,7 @@
   void ref_processing_init();
 
   virtual Name kind() const {
-    return CollectedHeap::G1CollectedHeap;
+    return CollectedHeap::G1;
   }
 
   virtual const char* name() const {
@@ -984,21 +970,6 @@
   // Try to minimize the remembered set.
   void scrub_rem_set();
 
-  uint get_gc_time_stamp() {
-    return _gc_time_stamp;
-  }
-
-  inline void reset_gc_time_stamp();
-
-  void check_gc_time_stamps() PRODUCT_RETURN;
-
-  inline void increment_gc_time_stamp();
-
-  // Reset the given region's GC timestamp. If it's starts humongous,
-  // also reset the GC timestamp of its corresponding
-  // continues humongous regions too.
-  void reset_gc_time_stamps(HeapRegion* hr);
-
   // Apply the given closure on all cards in the Hot Card Cache, emptying it.
   void iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i);
 
@@ -1063,26 +1034,6 @@
   }
 #endif // ASSERT
 
-  // Wrapper for the region list operations that can be called from
-  // methods outside this class.
-
-  void secondary_free_list_add(FreeRegionList* list) {
-    _secondary_free_list.add_ordered(list);
-  }
-
-  void append_secondary_free_list() {
-    _hrm.insert_list_into_free_list(&_secondary_free_list);
-  }
-
-  void append_secondary_free_list_if_not_empty_with_lock() {
-    // If the secondary free list looks empty there's no reason to
-    // take the lock and then try to append it.
-    if (!_secondary_free_list.is_empty()) {
-      MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
-      append_secondary_free_list();
-    }
-  }
-
   inline void old_set_add(HeapRegion* hr);
   inline void old_set_remove(HeapRegion* hr);
 
@@ -1090,11 +1041,6 @@
     return (_old_set.length() + _humongous_set.length()) * HeapRegion::GrainBytes;
   }
 
-  void set_free_regions_coming();
-  void reset_free_regions_coming();
-  bool free_regions_coming() { return _free_regions_coming; }
-  void wait_while_free_regions_coming();
-
   // Determine whether the given region is one that we are using as an
   // old GC alloc region.
   bool is_old_gc_alloc_region(HeapRegion* hr);
@@ -1305,7 +1251,7 @@
   // functions.
   // This performs a concurrent marking of the live objects in a
   // bitmap off to the side.
-  void doConcurrentMark();
+  void do_concurrent_mark();
 
   bool isMarkedNext(oop obj) const;
 
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -84,16 +84,6 @@
   return _hrm.addr_to_region((HeapWord*) addr);
 }
 
-inline void G1CollectedHeap::reset_gc_time_stamp() {
-  assert_at_safepoint_on_vm_thread();
-  _gc_time_stamp = 0;
-}
-
-inline void G1CollectedHeap::increment_gc_time_stamp() {
-  assert_at_safepoint_on_vm_thread();
-  ++_gc_time_stamp;
-}
-
 inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {
   _old_set.add(hr);
 }
@@ -162,17 +152,17 @@
 // Support for G1EvacuationFailureALot
 
 inline bool
-G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young,
+G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool for_young_gc,
                                                      bool during_initial_mark,
-                                                     bool during_marking) {
+                                                     bool mark_or_rebuild_in_progress) {
   bool res = false;
-  if (during_marking) {
+  if (mark_or_rebuild_in_progress) {
     res |= G1EvacuationFailureALotDuringConcMark;
   }
   if (during_initial_mark) {
     res |= G1EvacuationFailureALotDuringInitialMark;
   }
-  if (gcs_are_young) {
+  if (for_young_gc) {
     res |= G1EvacuationFailureALotDuringYoungGC;
   } else {
     // GCs are mixed
@@ -196,14 +186,14 @@
     _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
 
     // Now check if G1EvacuationFailureALot is enabled for the current GC type.
-    const bool gcs_are_young = collector_state()->gcs_are_young();
-    const bool during_im = collector_state()->during_initial_mark_pause();
-    const bool during_marking = collector_state()->mark_in_progress();
+    const bool in_young_only_phase = collector_state()->in_young_only_phase();
+    const bool in_initial_mark_gc = collector_state()->in_initial_mark_gc();
+    const bool mark_or_rebuild_in_progress = collector_state()->mark_or_rebuild_in_progress();
 
     _evacuation_failure_alot_for_current_gc &=
-      evacuation_failure_alot_for_gc_type(gcs_are_young,
-                                          during_im,
-                                          during_marking);
+      evacuation_failure_alot_for_gc_type(in_young_only_phase,
+                                          in_initial_mark_gc,
+                                          mark_or_rebuild_in_progress);
   }
 }
 
--- a/src/hotspot/share/gc/g1/g1CollectionSet.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1CollectionSet.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -47,7 +47,7 @@
 }
 
 double G1CollectionSet::predict_region_elapsed_time_ms(HeapRegion* hr) {
-  return _policy->predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
+  return _policy->predict_region_elapsed_time_ms(hr, collector_state()->in_young_only_phase());
 }
 
 G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) :
@@ -255,21 +255,23 @@
   // are calculated, aggregated with the policy collection set info,
   // and cached in the heap region here (initially) and (subsequently)
   // by the Young List sampling code.
+  // Ignore calls to this due to retirement during full gc.
 
-  size_t rs_length = hr->rem_set()->occupied();
-  double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr);
+  if (!G1CollectedHeap::heap()->collector_state()->in_full_gc()) {
+    size_t rs_length = hr->rem_set()->occupied();
+    double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr);
 
-  // Cache the values we have added to the aggregated information
-  // in the heap region in case we have to remove this region from
-  // the incremental collection set, or it is updated by the
-  // rset sampling code
-  hr->set_recorded_rs_length(rs_length);
-  hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
+    // Cache the values we have added to the aggregated information
+    // in the heap region in case we have to remove this region from
+    // the incremental collection set, or it is updated by the
+    // rset sampling code
+    hr->set_recorded_rs_length(rs_length);
+    hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
 
-  size_t used_bytes = hr->used();
-  _inc_recorded_rs_lengths += rs_length;
-  _inc_predicted_elapsed_time_ms += region_elapsed_time_ms;
-  _inc_bytes_used_before += used_bytes;
+    _inc_recorded_rs_lengths += rs_length;
+    _inc_predicted_elapsed_time_ms += region_elapsed_time_ms;
+    _inc_bytes_used_before += hr->used();
+  }
 
   assert(!hr->in_collection_set(), "invariant");
   _g1->register_young_region_with_cset(hr);
@@ -366,8 +368,6 @@
   log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms",
                             pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
 
-  collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
-
   // The young list is laid with the survivor regions from the previous
   // pause are appended to the RHS of the young list, i.e.
   //   [Newly Young Regions ++ Survivors from last pause].
@@ -411,7 +411,7 @@
   double non_young_start_time_sec = os::elapsedTime();
   double predicted_old_time_ms = 0.0;
 
-  if (!collector_state()->gcs_are_young()) {
+  if (collector_state()->in_mixed_phase()) {
     cset_chooser()->verify();
     const uint min_old_cset_length = _policy->calc_min_old_cset_length();
     const uint max_old_cset_length = _policy->calc_max_old_cset_length();
--- a/src/hotspot/share/gc/g1/g1CollectorState.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1CollectorState.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -28,18 +28,17 @@
 #include "gc/g1/g1YCTypes.hpp"
 #include "utilities/globalDefinitions.hpp"
 
-// Various state variables that indicate
-// the phase of the G1 collection.
+// State of the G1 collection.
 class G1CollectorState {
-  // Indicates whether we are in "full young" or "mixed" GC mode.
-  bool _gcs_are_young;
-  // Was the last GC "young"?
-  bool _last_gc_was_young;
-  // Is this the "last young GC" before we start doing mixed GCs?
-  // Set after a concurrent mark has completed.
-  bool _last_young_gc;
+  // Indicates whether we are in the phase where we do partial gcs that only contain
+  // the young generation. Not set while _in_full_gc is set.
+  bool _in_young_only_phase;
 
-  // If initiate_conc_mark_if_possible() is set at the beginning of a
+  // Indicates whether we are in the last young gc before the mixed gc phase. This GC
+  // is required to keep pause time requirements.
+  bool _in_young_gc_before_mixed;
+
+  // If _initiate_conc_mark_if_possible is set at the beginning of a
   // pause, it is a suggestion that the pause should start a marking
   // cycle by doing the initial-mark work. However, it is possible
   // that the concurrent marking thread is still finishing up the
@@ -48,81 +47,75 @@
   // we'll have to wait for the concurrent marking thread to finish
   // what it is doing. In this case we will postpone the marking cycle
   // initiation decision for the next pause. When we eventually decide
-  // to start a cycle, we will set _during_initial_mark_pause which
-  // will stay true until the end of the initial-mark pause and it's
-  // the condition that indicates that a pause is doing the
+  // to start a cycle, we will set _in_initial_mark_gc which
+  // will stay true until the end of the initial-mark pause doing the
   // initial-mark work.
-  volatile bool _during_initial_mark_pause;
+  volatile bool _in_initial_mark_gc;
 
   // At the end of a pause we check the heap occupancy and we decide
   // whether we will start a marking cycle during the next pause. If
-  // we decide that we want to do that, we will set this parameter to
-  // true. So, this parameter will stay true between the end of a
-  // pause and the beginning of a subsequent pause (not necessarily
-  // the next one, see the comments on the next field) when we decide
-  // that we will indeed start a marking cycle and do the initial-mark
-  // work.
+  // we decide that we want to do that, set this parameter. This parameter will
+  // stay set until the beginning of a subsequent pause (not necessarily
+  // the next one) when we decide that we will indeed start a marking cycle and
+  // do the initial-mark work.
   volatile bool _initiate_conc_mark_if_possible;
 
-  // NOTE: if some of these are synonyms for others,
-  // the redundant fields should be eliminated. XXX
-  bool _during_marking;
-  bool _mark_in_progress;
-  bool _in_marking_window;
-  bool _in_marking_window_im;
+  // Marking or rebuilding remembered set work is in progress. Set from the end
+  // of the initial mark pause to the end of the Cleanup pause.
+  bool _mark_or_rebuild_in_progress;
 
-  bool _full_collection;
+  // The next bitmap is currently being cleared or about to be cleared. TAMS and bitmap
+  // may be out of sync.
+  bool _clearing_next_bitmap;
+
+  // Set during a full gc pause.
+  bool _in_full_gc;
 
-  public:
-    G1CollectorState() :
-      _gcs_are_young(true),
-      _last_gc_was_young(false),
-      _last_young_gc(false),
+public:
+  G1CollectorState() :
+    _in_young_only_phase(true),
+    _in_young_gc_before_mixed(false),
 
-      _during_initial_mark_pause(false),
-      _initiate_conc_mark_if_possible(false),
+    _in_initial_mark_gc(false),
+    _initiate_conc_mark_if_possible(false),
 
-      _during_marking(false),
-      _mark_in_progress(false),
-      _in_marking_window(false),
-      _in_marking_window_im(false),
-      _full_collection(false) {}
+    _mark_or_rebuild_in_progress(false),
+    _clearing_next_bitmap(false),
+    _in_full_gc(false) { }
 
-  // Setters
-  void set_gcs_are_young(bool v) { _gcs_are_young = v; }
-  void set_last_gc_was_young(bool v) { _last_gc_was_young = v; }
-  void set_last_young_gc(bool v) { _last_young_gc = v; }
-  void set_during_initial_mark_pause(bool v) { _during_initial_mark_pause = v; }
+  // Phase setters
+  void set_in_young_only_phase(bool v) { _in_young_only_phase = v; }
+
+  // Pause setters
+  void set_in_young_gc_before_mixed(bool v) { _in_young_gc_before_mixed = v; }
+  void set_in_initial_mark_gc(bool v) { _in_initial_mark_gc = v; }
+  void set_in_full_gc(bool v) { _in_full_gc = v; }
+
   void set_initiate_conc_mark_if_possible(bool v) { _initiate_conc_mark_if_possible = v; }
-  void set_during_marking(bool v) { _during_marking = v; }
-  void set_mark_in_progress(bool v) { _mark_in_progress = v; }
-  void set_in_marking_window(bool v) { _in_marking_window = v; }
-  void set_in_marking_window_im(bool v) { _in_marking_window_im = v; }
-  void set_full_collection(bool v) { _full_collection = v; }
+
+  void set_mark_or_rebuild_in_progress(bool v) { _mark_or_rebuild_in_progress = v; }
+  void set_clearing_next_bitmap(bool v) { _clearing_next_bitmap = v; }
 
-  // Getters
-  bool gcs_are_young() const { return _gcs_are_young; }
-  bool last_gc_was_young() const { return _last_gc_was_young; }
-  bool last_young_gc() const { return _last_young_gc; }
-  bool during_initial_mark_pause() const { return _during_initial_mark_pause; }
+  // Phase getters
+  bool in_young_only_phase() const { return _in_young_only_phase && !_in_full_gc; }
+  bool in_mixed_phase() const { return !in_young_only_phase() && !_in_full_gc; }
+
+  // Specific pauses
+  bool in_young_gc_before_mixed() const { return _in_young_gc_before_mixed; }
+  bool in_full_gc() const { return _in_full_gc; }
+  bool in_initial_mark_gc() const { return _in_initial_mark_gc; }
+
   bool initiate_conc_mark_if_possible() const { return _initiate_conc_mark_if_possible; }
-  bool during_marking() const { return _during_marking; }
-  bool mark_in_progress() const { return _mark_in_progress; }
-  bool in_marking_window() const { return _in_marking_window; }
-  bool in_marking_window_im() const { return _in_marking_window_im; }
-  bool full_collection() const { return _full_collection; }
 
-  // Composite booleans (clients worry about flickering)
-  bool during_concurrent_mark() const {
-    return (_in_marking_window && !_in_marking_window_im);
-  }
+  bool mark_or_rebuild_in_progress() const { return _mark_or_rebuild_in_progress; }
+  bool clearing_next_bitmap() const { return _clearing_next_bitmap; }
 
   G1YCType yc_type() const {
-    if (during_initial_mark_pause()) {
+    if (in_initial_mark_gc()) {
       return InitialMark;
-    } else if (mark_in_progress()) {
-      return DuringMark;
-    } else if (gcs_are_young()) {
+    } else if (mark_or_rebuild_in_progress()) {
+      return DuringMarkOrRebuild;
+    } else if (in_young_only_phase()) {
       return Normal;
     } else {
       return Mixed;
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,14 +26,14 @@
 #include "classfile/metadataOnStackMark.hpp"
 #include "classfile/symbolTable.hpp"
 #include "code/codeCache.hpp"
-#include "gc/g1/concurrentMarkThread.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1ConcurrentMark.inline.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
 #include "gc/g1/g1HeapVerifier.hpp"
 #include "gc/g1/g1OopClosures.inline.hpp"
-#include "gc/g1/g1CardLiveData.inline.hpp"
 #include "gc/g1/g1Policy.hpp"
+#include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
@@ -50,9 +50,11 @@
 #include "gc/shared/taskqueue.inline.hpp"
 #include "gc/shared/vmGCOperations.hpp"
 #include "gc/shared/weakProcessor.hpp"
+#include "include/jvm.h"
 #include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/handles.inline.hpp"
@@ -253,7 +255,7 @@
 }
 
 G1CMRootRegions::G1CMRootRegions() :
-  _cm(NULL), _scan_in_progress(false),
+  _survivors(NULL), _cm(NULL), _scan_in_progress(false),
   _should_abort(false), _claimed_survivor_index(0) { }
 
 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
@@ -316,7 +318,9 @@
 }
 
 bool G1CMRootRegions::wait_until_scan_finished() {
-  if (!scan_in_progress()) return false;
+  if (!scan_in_progress()) {
+    return false;
+  }
 
   {
     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
@@ -341,14 +345,12 @@
   _g1h(g1h),
   _completed_initialization(false),
 
-  _cleanup_list("Concurrent Mark Cleanup List"),
   _mark_bitmap_1(),
   _mark_bitmap_2(),
   _prev_mark_bitmap(&_mark_bitmap_1),
   _next_mark_bitmap(&_mark_bitmap_2),
 
-  _heap_start(_g1h->reserved_region().start()),
-  _heap_end(_g1h->reserved_region().end()),
+  _heap(_g1h->reserved_region()),
 
   _root_regions(),
 
@@ -356,6 +358,7 @@
 
   // _finger set in set_non_marking_state
 
+  _worker_id_offset(DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads),
   _max_num_tasks(ParallelGCThreads),
   // _num_active_tasks set in set_non_marking_state()
   // _tasks set inside the constructor
@@ -370,7 +373,6 @@
   _concurrent(false),
   _has_aborted(false),
   _restart_for_overflow(false),
-  _concurrent_marking_in_progress(false),
   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
 
@@ -381,20 +383,22 @@
   _remark_mark_times(),
   _remark_weak_ref_times(),
   _cleanup_times(),
-  _total_counting_time(0.0),
-  _total_rs_scrub_time(0.0),
+  _total_cleanup_time(0.0),
 
   _accum_task_vtime(NULL),
 
   _concurrent_workers(NULL),
   _num_concurrent_workers(0),
-  _max_concurrent_workers(0)
+  _max_concurrent_workers(0),
+
+  _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)),
+  _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC))
 {
   _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage);
   _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
 
   // Create & start ConcurrentMark thread.
-  _cm_thread = new ConcurrentMarkThread(this);
+  _cm_thread = new G1ConcurrentMarkThread(this);
   if (_cm_thread->osthread() == NULL) {
     vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
   }
@@ -420,7 +424,7 @@
     return;
   }
 
-  log_debug(gc)("ConcGCThreads: %u", ConcGCThreads);
+  log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset);
   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
 
   _num_concurrent_workers = ConcGCThreads;
@@ -478,53 +482,85 @@
     task_queue->initialize();
     _task_queues->register_queue(i, task_queue);
 
-    _tasks[i] = new G1CMTask(i, this, task_queue);
+    _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions());
 
     _accum_task_vtime[i] = 0.0;
   }
 
-  set_non_marking_state();
+  reset_at_marking_complete();
   _completed_initialization = true;
 }
 
 void G1ConcurrentMark::reset() {
-  // Starting values for these two. This should be called in a STW
-  // phase.
-  MemRegion reserved = _g1h->g1_reserved();
-  _heap_start = reserved.start();
-  _heap_end   = reserved.end();
-
-  // Separated the asserts so that we know which one fires.
-  assert(_heap_start != NULL, "heap bounds should look ok");
-  assert(_heap_end != NULL, "heap bounds should look ok");
-  assert(_heap_start < _heap_end, "heap bounds should look ok");
-
-  // Reset all the marking data structures and any necessary flags
-  reset_marking_state();
-
-  // We reset all of them, since different phases will use
-  // different number of active threads. So, it's easiest to have all
-  // of them ready.
+  _has_aborted = false;
+
+  reset_marking_for_restart();
+
+  // Reset all tasks, since different phases will use different number of active
+  // threads. So, it's easiest to have all of them ready.
   for (uint i = 0; i < _max_num_tasks; ++i) {
     _tasks[i]->reset(_next_mark_bitmap);
   }
 
-  // we need this to make sure that the flag is on during the evac
-  // pause with initial mark piggy-backed
-  set_concurrent_marking_in_progress();
+  uint max_regions = _g1h->max_regions();
+  for (uint i = 0; i < max_regions; i++) {
+    _top_at_rebuild_starts[i] = NULL;
+    _region_mark_stats[i].clear();
+  }
+}
+
+void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) {
+  for (uint j = 0; j < _max_num_tasks; ++j) {
+    _tasks[j]->clear_mark_stats_cache(region_idx);
+  }
+  _top_at_rebuild_starts[region_idx] = NULL;
+  _region_mark_stats[region_idx].clear();
 }
 
-
-void G1ConcurrentMark::reset_marking_state() {
+void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
+  uint const region_idx = r->hrm_index();
+  if (r->is_humongous()) {
+    assert(r->is_starts_humongous(), "Got humongous continues region here");
+    uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size());
+    for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
+      clear_statistics_in_region(j);
+    }
+  } else {
+    clear_statistics_in_region(region_idx);
+  }
+}
+
+void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
+  assert_at_safepoint_on_vm_thread();
+
+  // Need to clear mark bit of the humongous object.
+  if (_next_mark_bitmap->is_marked(r->bottom())) {
+    _next_mark_bitmap->clear(r->bottom());
+  }
+
+  if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
+    return;
+  }
+
+  // Clear any statistics about the region gathered so far.
+  clear_statistics(r);
+}
+
+void G1ConcurrentMark::reset_marking_for_restart() {
   _global_mark_stack.set_empty();
 
   // Expand the marking stack, if we have to and if we can.
   if (has_overflown()) {
     _global_mark_stack.expand();
+
+    uint max_regions = _g1h->max_regions();
+    for (uint i = 0; i < max_regions; i++) {
+      _region_mark_stats[i].clear_during_overflow();
+    }
   }
 
   clear_has_overflown();
-  _finger = _heap_start;
+  _finger = _heap.start();
 
   for (uint i = 0; i < _max_num_tasks; ++i) {
     G1CMTaskQueue* queue = _task_queues->queue(i);
@@ -538,7 +574,7 @@
   _num_active_tasks = active_tasks;
   // Need to update the three data structures below according to the
   // number of active threads for this phase.
-  _terminator   = ParallelTaskTerminator((int) active_tasks, _task_queues);
+  _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues);
   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 }
@@ -547,33 +583,26 @@
   set_concurrency(active_tasks);
 
   _concurrent = concurrent;
-  // We propagate this to all tasks, not just the active ones.
-  for (uint i = 0; i < _max_num_tasks; ++i) {
-    _tasks[i]->set_concurrent(concurrent);
-  }
-
-  if (concurrent) {
-    set_concurrent_marking_in_progress();
-  } else {
-    // We currently assume that the concurrent flag has been set to
-    // false before we start remark. At this point we should also be
-    // in a STW phase.
-    assert(!concurrent_marking_in_progress(), "invariant");
+
+  if (!concurrent) {
+    // At this point we should be in a STW phase, and completed marking.
+    assert_at_safepoint_on_vm_thread();
     assert(out_of_regions(),
            "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
-           p2i(_finger), p2i(_heap_end));
+           p2i(_finger), p2i(_heap.end()));
   }
 }
 
-void G1ConcurrentMark::set_non_marking_state() {
+void G1ConcurrentMark::reset_at_marking_complete() {
   // We set the global marking state to some default values when we're
   // not doing marking.
-  reset_marking_state();
+  reset_marking_for_restart();
   _num_active_tasks = 0;
-  clear_concurrent_marking_in_progress();
 }
 
 G1ConcurrentMark::~G1ConcurrentMark() {
+  FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts);
+  FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats);
   // The G1ConcurrentMark instance is never freed.
   ShouldNotReachHere();
 }
@@ -613,7 +642,7 @@
         // will have them as guarantees at the beginning / end of the bitmap
         // clearing to get some checking in the product.
         assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant");
-        assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant");
+        assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant");
       }
       assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
 
@@ -667,30 +696,22 @@
   // marking bitmap and getting it ready for the next cycle. During
   // this time no other cycle can start. So, let's make sure that this
   // is the case.
-  guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
+  guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 
   clear_bitmap(_next_mark_bitmap, _concurrent_workers, true);
 
-  // Clear the live count data. If the marking has been aborted, the abort()
-  // call already did that.
-  if (!has_aborted()) {
-    clear_live_data(_concurrent_workers);
-    DEBUG_ONLY(verify_live_data_clear());
-  }
-
   // Repeat the asserts from above.
   guarantee(cm_thread()->during_cycle(), "invariant");
-  guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
+  guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 }
 
 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
-  assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint.");
+  assert_at_safepoint_on_vm_thread();
   clear_bitmap(_prev_mark_bitmap, workers, false);
 }
 
 class CheckBitmapClearHRClosure : public HeapRegionClosure {
   G1CMBitMap* _bitmap;
-  bool _error;
  public:
   CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) {
   }
@@ -711,7 +732,7 @@
   return cl.is_complete();
 }
 
-class NoteStartOfMarkHRClosure: public HeapRegionClosure {
+class NoteStartOfMarkHRClosure : public HeapRegionClosure {
 public:
   bool do_heap_region(HeapRegion* r) {
     r->note_start_of_marking();
@@ -719,25 +740,19 @@
   }
 };
 
-void G1ConcurrentMark::checkpoint_roots_initial_pre() {
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
-  _has_aborted = false;
-
+void G1ConcurrentMark::pre_initial_mark() {
   // Initialize marking structures. This has to be done in a STW phase.
   reset();
 
   // For each region note start of marking.
   NoteStartOfMarkHRClosure startcl;
-  g1h->heap_region_iterate(&startcl);
+  _g1h->heap_region_iterate(&startcl);
 }
 
 
-void G1ConcurrentMark::checkpoint_roots_initial_post() {
-  G1CollectedHeap*   g1h = G1CollectedHeap::heap();
-
+void G1ConcurrentMark::post_initial_mark() {
   // Start Concurrent Marking weak-reference discovery.
-  ReferenceProcessor* rp = g1h->ref_processor_cm();
+  ReferenceProcessor* rp = _g1h->ref_processor_cm();
   // enable ("weak") refs discovery
   rp->enable_discovery();
   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
@@ -792,29 +807,6 @@
     // just abort the whole marking phase as quickly as possible.
     return;
   }
-
-  // If we're executing the concurrent phase of marking, reset the marking
-  // state; otherwise the marking state is reset after reference processing,
-  // during the remark pause.
-  // If we reset here as a result of an overflow during the remark we will
-  // see assertion failures from any subsequent set_concurrency_and_phase()
-  // calls.
-  if (concurrent()) {
-    // let the task associated with with worker 0 do this
-    if (worker_id == 0) {
-      // task 0 is responsible for clearing the global data structures
-      // We should be here because of an overflow. During STW we should
-      // not clear the overflow flag since we rely on it being true when
-      // we exit this method to abort the pause and restart concurrent
-      // marking.
-      reset_marking_state();
-
-      log_info(gc, marking)("Concurrent Mark reset for overflow");
-    }
-  }
-
-  // after this, each task should reset its own data structures then
-  // then go into the second barrier
 }
 
 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
@@ -824,10 +816,8 @@
   // at this point everything should be re-initialized and ready to go
 }
 
-class G1CMConcurrentMarkingTask: public AbstractGangTask {
-private:
+class G1CMConcurrentMarkingTask : public AbstractGangTask {
   G1ConcurrentMark*     _cm;
-  ConcurrentMarkThread* _cmt;
 
 public:
   void work(uint worker_id) {
@@ -860,9 +850,8 @@
     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
   }
 
-  G1CMConcurrentMarkingTask(G1ConcurrentMark* cm,
-                            ConcurrentMarkThread* cmt) :
-      AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
+  G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) :
+      AbstractGangTask("Concurrent Mark"), _cm(cm) { }
 
   ~G1CMConcurrentMarkingTask() { }
 };
@@ -888,10 +877,10 @@
   return result;
 }
 
-void G1ConcurrentMark::scan_root_region(HeapRegion* hr) {
+void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) {
   // Currently, only survivors can be root regions.
   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
-  G1RootRegionScanClosure cl(_g1h, this);
+  G1RootRegionScanClosure cl(_g1h, this, worker_id);
 
   const uintx interval = PrefetchScanIntervalInBytes;
   HeapWord* curr = hr->bottom();
@@ -906,9 +895,7 @@
 }
 
 class G1CMRootRegionScanTask : public AbstractGangTask {
-private:
   G1ConcurrentMark* _cm;
-
 public:
   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
     AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
@@ -920,7 +907,7 @@
     G1CMRootRegions* root_regions = _cm->root_regions();
     HeapRegion* hr = root_regions->claim_next();
     while (hr != NULL) {
-      _cm->scan_root_region(hr);
+      _cm->scan_root_region(hr, worker_id);
       hr = root_regions->claim_next();
     }
   }
@@ -961,9 +948,12 @@
 }
 
 void G1ConcurrentMark::concurrent_cycle_end() {
+  _g1h->collector_state()->set_clearing_next_bitmap(false);
+
   _g1h->trace_heap_after_gc(_gc_tracer_cm);
 
   if (has_aborted()) {
+    log_info(gc, marking)("Concurrent Mark Abort");
     _gc_tracer_cm->report_concurrent_mode_failure();
   }
 
@@ -973,13 +963,6 @@
 }
 
 void G1ConcurrentMark::mark_from_roots() {
-  // we might be tempted to assert that:
-  // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
-  //        "inconsistent argument?");
-  // However that wouldn't be right, because it's possible that
-  // a safepoint is indeed in progress as a younger generation
-  // stop-the-world GC happens even as we mark in this generation.
-
   _restart_for_overflow = false;
 
   _num_concurrent_workers = calc_active_marking_workers();
@@ -995,67 +978,135 @@
   // Parallel task terminator is set in "set_concurrency_and_phase()"
   set_concurrency_and_phase(active_workers, true /* concurrent */);
 
-  G1CMConcurrentMarkingTask marking_task(this, cm_thread());
+  G1CMConcurrentMarkingTask marking_task(this);
   _concurrent_workers->run_task(&marking_task);
   print_stats();
 }
 
-void G1ConcurrentMark::checkpoint_roots_final(bool clear_all_soft_refs) {
-  // world is stopped at this checkpoint
-  assert(SafepointSynchronize::is_at_safepoint(),
-         "world should be stopped");
-
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
-  // If a full collection has happened, we shouldn't do this.
+void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) {
+  G1HeapVerifier* verifier = _g1h->verifier();
+
+  verifier->verify_region_sets_optional();
+
+  if (VerifyDuringGC) {
+    GCTraceTime(Debug, gc, phases) trace(caller, _gc_timer_cm);
+
+    size_t const BufLen = 512;
+    char buffer[BufLen];
+
+    jio_snprintf(buffer, BufLen, "During GC (%s)", caller);
+    verifier->verify(type, vo, buffer);
+  }
+
+  verifier->check_bitmaps(caller);
+}
+
+class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
+  G1CollectedHeap* _g1h;
+  G1ConcurrentMark* _cm;
+
+  uint _num_regions_selected_for_rebuild;  // The number of regions actually selected for rebuild.
+
+  void update_remset_before_rebuild(HeapRegion * hr) {
+    G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker();
+
+    size_t live_bytes = _cm->liveness(hr->hrm_index()) * HeapWordSize;
+    bool selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
+    if (selected_for_rebuild) {
+      _num_regions_selected_for_rebuild++;
+    }
+    _cm->update_top_at_rebuild_start(hr);
+  }
+
+public:
+  G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm) :
+    _g1h(g1h), _cm(cm), _num_regions_selected_for_rebuild(0) { }
+
+  virtual bool do_heap_region(HeapRegion* r) {
+    update_remset_before_rebuild(r);
+    return false;
+  }
+
+  uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
+};
+
+class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
+  G1CollectedHeap* _g1h;
+public:
+  G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
+
+  virtual bool do_heap_region(HeapRegion* r) {
+    _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r);
+    return false;
+  }
+};
+
+void G1ConcurrentMark::remark() {
+  assert_at_safepoint_on_vm_thread();
+
+  // If a full collection has happened, we should not continue. However we might
+  // have ended up here as the Remark VM operation has been scheduled already.
   if (has_aborted()) {
-    g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
     return;
   }
 
-  if (VerifyDuringGC) {
-    g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "During GC (before)");
-  }
-  g1h->verifier()->check_bitmaps("Remark Start");
-
-  G1Policy* g1p = g1h->g1_policy();
+  G1Policy* g1p = _g1h->g1_policy();
   g1p->record_concurrent_mark_remark_start();
 
   double start = os::elapsedTime();
 
-  checkpoint_roots_final_work();
+  verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before");
+
+  {
+    GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm);
+    finalize_marking();
+  }
 
   double mark_work_end = os::elapsedTime();
 
-  weak_refs_work(clear_all_soft_refs);
-
-  if (has_overflown()) {
+  bool const mark_finished = !has_overflown();
+  if (mark_finished) {
+    weak_refs_work(false /* clear_all_soft_refs */);
+
+    SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
+    // We're done with marking.
+    // This is the end of the marking cycle, we're expected all
+    // threads to have SATB queues with active set to true.
+    satb_mq_set.set_active_all_threads(false, /* new active value */
+                                       true /* expected_active */);
+
+    {
+      GCTraceTime(Debug, gc, phases)("Flush Task Caches");
+      flush_all_task_caches();
+    }
+
+    {
+      GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking Before Rebuild");
+      G1UpdateRemSetTrackingBeforeRebuild cl(_g1h, this);
+      _g1h->heap_region_iterate(&cl);
+      log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
+                                      _g1h->num_regions(), cl.num_selected_for_rebuild());
+    }
+
+    verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UseNextMarking, "Remark after");
+
+    assert(!restart_for_overflow(), "sanity");
+    // Completely reset the marking state since marking completed
+    reset_at_marking_complete();
+  } else {
     // We overflowed.  Restart concurrent marking.
     _restart_for_overflow = true;
 
-    // Verify the heap w.r.t. the previous marking bitmap.
-    if (VerifyDuringGC) {
-      g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "During GC (overflow)");
-    }
+    verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
 
     // Clear the marking state because we will be restarting
     // marking due to overflowing the global mark stack.
-    reset_marking_state();
-  } else {
-    SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
-    // We're done with marking.
-    // This is the end of  the marking cycle, we're expected all
-    // threads to have SATB queues with active set to true.
-    satb_mq_set.set_active_all_threads(false, /* new active value */
-                                       true /* expected_active */);
-
-    if (VerifyDuringGC) {
-      g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UseNextMarking, "During GC (after)");
-    }
-    g1h->verifier()->check_bitmaps("Remark End");
-    assert(!restart_for_overflow(), "sanity");
-    // Completely reset the marking state since marking completed
-    set_non_marking_state();
+    reset_marking_for_restart();
+  }
+
+  {
+    GCTraceTime(Debug, gc, phases)("Report Object Count");
+    report_object_count();
   }
 
   // Statistics
@@ -1065,99 +1116,85 @@
   _remark_times.add((now - start) * 1000.0);
 
   g1p->record_concurrent_mark_remark_end();
-
-  G1CMIsAliveClosure is_alive(g1h);
-  _gc_tracer_cm->report_object_count_after_gc(&is_alive);
 }
 
-class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
-  G1CollectedHeap* _g1;
-  size_t _freed_bytes;
-  FreeRegionList* _local_cleanup_list;
-  uint _old_regions_removed;
-  uint _humongous_regions_removed;
-  HRRSCleanupTask* _hrrs_cleanup_task;
-
-public:
-  G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
-                             FreeRegionList* local_cleanup_list,
-                             HRRSCleanupTask* hrrs_cleanup_task) :
-    _g1(g1),
-    _freed_bytes(0),
-    _local_cleanup_list(local_cleanup_list),
-    _old_regions_removed(0),
-    _humongous_regions_removed(0),
-    _hrrs_cleanup_task(hrrs_cleanup_task) { }
-
-  size_t freed_bytes() { return _freed_bytes; }
-  const uint old_regions_removed() { return _old_regions_removed; }
-  const uint humongous_regions_removed() { return _humongous_regions_removed; }
-
-  bool do_heap_region(HeapRegion *hr) {
-    _g1->reset_gc_time_stamps(hr);
-    hr->note_end_of_marking();
-
-    if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
-      _freed_bytes += hr->used();
-      hr->set_containing_set(NULL);
-      if (hr->is_humongous()) {
-        _humongous_regions_removed++;
-        _g1->free_humongous_region(hr, _local_cleanup_list, true /* skip_remset */);
+class G1CleanupTask : public AbstractGangTask {
+  // Per-region work during the Cleanup pause.
+  class G1CleanupRegionsClosure : public HeapRegionClosure {
+    G1CollectedHeap* _g1h;
+    size_t _freed_bytes;
+    FreeRegionList* _local_cleanup_list;
+    uint _old_regions_removed;
+    uint _humongous_regions_removed;
+    HRRSCleanupTask* _hrrs_cleanup_task;
+
+  public:
+    G1CleanupRegionsClosure(G1CollectedHeap* g1,
+                            FreeRegionList* local_cleanup_list,
+                            HRRSCleanupTask* hrrs_cleanup_task) :
+      _g1h(g1),
+      _freed_bytes(0),
+      _local_cleanup_list(local_cleanup_list),
+      _old_regions_removed(0),
+      _humongous_regions_removed(0),
+      _hrrs_cleanup_task(hrrs_cleanup_task) { }
+
+    size_t freed_bytes() { return _freed_bytes; }
+    const uint old_regions_removed() { return _old_regions_removed; }
+    const uint humongous_regions_removed() { return _humongous_regions_removed; }
+
+    bool do_heap_region(HeapRegion *hr) {
+      hr->note_end_of_marking();
+
+      if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
+        _freed_bytes += hr->used();
+        hr->set_containing_set(NULL);
+        if (hr->is_humongous()) {
+          _humongous_regions_removed++;
+          _g1h->free_humongous_region(hr, _local_cleanup_list);
+        } else {
+          _old_regions_removed++;
+          _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
+        }
+        hr->clear_cardtable();
+        _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
+        log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
       } else {
-        _old_regions_removed++;
-        _g1->free_region(hr, _local_cleanup_list, true /* skip_remset */);
+        hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
       }
-    } else {
-      hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
+
+      return false;
     }
-
-    return false;
-  }
-};
-
-class G1ParNoteEndTask: public AbstractGangTask {
-  friend class G1NoteEndOfConcMarkClosure;
-
-protected:
+  };
+
   G1CollectedHeap* _g1h;
   FreeRegionList* _cleanup_list;
   HeapRegionClaimer _hrclaimer;
 
 public:
-  G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
-      AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
+  G1CleanupTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
+    AbstractGangTask("G1 Cleanup"),
+    _g1h(g1h),
+    _cleanup_list(cleanup_list),
+    _hrclaimer(n_workers) {
+
+    HeapRegionRemSet::reset_for_cleanup_tasks();
   }
 
   void work(uint worker_id) {
     FreeRegionList local_cleanup_list("Local Cleanup List");
     HRRSCleanupTask hrrs_cleanup_task;
-    G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
-                                           &hrrs_cleanup_task);
-    _g1h->heap_region_par_iterate_from_worker_offset(&g1_note_end, &_hrclaimer, worker_id);
-    assert(g1_note_end.is_complete(), "Shouldn't have yielded!");
-
-    // Now update the lists
-    _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
+    G1CleanupRegionsClosure cl(_g1h,
+                               &local_cleanup_list,
+                               &hrrs_cleanup_task);
+    _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
+    assert(cl.is_complete(), "Shouldn't have aborted!");
+
+    // Now update the old/humongous region sets
+    _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed());
     {
       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
-      _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
-
-      // If we iterate over the global cleanup list at the end of
-      // cleanup to do this printing we will not guarantee to only
-      // generate output for the newly-reclaimed regions (the list
-      // might not be empty at the beginning of cleanup; we might
-      // still be working on its previous contents). So we do the
-      // printing here, before we append the new regions to the global
-      // cleanup list.
-
-      G1HRPrinter* hr_printer = _g1h->hr_printer();
-      if (hr_printer->is_active()) {
-        FreeRegionListIterator iter(&local_cleanup_list);
-        while (iter.more_available()) {
-          HeapRegion* hr = iter.get_next();
-          hr_printer->cleanup(hr);
-        }
-      }
+      _g1h->decrement_summary_bytes(cl.freed_bytes());
 
       _cleanup_list->add_ordered(&local_cleanup_list);
       assert(local_cleanup_list.is_empty(), "post-condition");
@@ -1167,164 +1204,92 @@
   }
 };
 
+void G1ConcurrentMark::reclaim_empty_regions() {
+  WorkGang* workers = _g1h->workers();
+  FreeRegionList empty_regions_list("Empty Regions After Mark List");
+
+  G1CleanupTask cl(_g1h, &empty_regions_list, workers->active_workers());
+  workers->run_task(&cl);
+
+  if (!empty_regions_list.is_empty()) {
+    log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length());
+    // Now print the empty regions list.
+    G1HRPrinter* hrp = _g1h->hr_printer();
+    if (hrp->is_active()) {
+      FreeRegionListIterator iter(&empty_regions_list);
+      while (iter.more_available()) {
+        HeapRegion* hr = iter.get_next();
+        hrp->cleanup(hr);
+      }
+    }
+    // And actually make them available.
+    _g1h->prepend_to_freelist(&empty_regions_list);
+  }
+}
+
 void G1ConcurrentMark::cleanup() {
-  // world is stopped at this checkpoint
-  assert(SafepointSynchronize::is_at_safepoint(),
-         "world should be stopped");
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  assert_at_safepoint_on_vm_thread();
 
   // If a full collection has happened, we shouldn't do this.
   if (has_aborted()) {
-    g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
     return;
   }
 
-  g1h->verifier()->verify_region_sets_optional();
-
-  if (VerifyDuringGC) {
-    g1h->verifier()->verify(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "During GC (before)");
-  }
-  g1h->verifier()->check_bitmaps("Cleanup Start");
-
-  G1Policy* g1p = g1h->g1_policy();
+  G1Policy* g1p = _g1h->g1_policy();
   g1p->record_concurrent_mark_cleanup_start();
 
   double start = os::elapsedTime();
 
-  HeapRegionRemSet::reset_for_cleanup_tasks();
+  verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UseNextMarking, "Cleanup before");
 
   {
-    GCTraceTime(Debug, gc)("Finalize Live Data");
-    finalize_live_data();
+    GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking After Rebuild");
+    G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
+    _g1h->heap_region_iterate(&cl);
   }
 
-  if (VerifyDuringGC) {
-    GCTraceTime(Debug, gc)("Verify Live Data");
-    verify_live_data();
-  }
-
-  g1h->collector_state()->set_mark_in_progress(false);
-
-  double count_end = os::elapsedTime();
-  double this_final_counting_time = (count_end - start);
-  _total_counting_time += this_final_counting_time;
-
   if (log_is_enabled(Trace, gc, liveness)) {
-    G1PrintRegionLivenessInfoClosure cl("Post-Marking");
+    G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
     _g1h->heap_region_iterate(&cl);
   }
 
-  // Install newly created mark bitMap as "prev".
+  // Install newly created mark bitmap as "prev".
   swap_mark_bitmaps();
-
-  g1h->reset_gc_time_stamp();
-
-  uint n_workers = _g1h->workers()->active_workers();
-
-  // Note end of marking in all heap regions.
-  G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
-  g1h->workers()->run_task(&g1_par_note_end_task);
-  g1h->check_gc_time_stamps();
-
-  if (!cleanup_list_is_empty()) {
-    // The cleanup list is not empty, so we'll have to process it
-    // concurrently. Notify anyone else that might be wanting free
-    // regions that there will be more free regions coming soon.
-    g1h->set_free_regions_coming();
+  {
+    GCTraceTime(Debug, gc, phases)("Reclaim Empty Regions");
+    reclaim_empty_regions();
   }
 
-  // call below, since it affects the metric by which we sort the heap
-  // regions.
-  if (G1ScrubRemSets) {
-    double rs_scrub_start = os::elapsedTime();
-    g1h->scrub_rem_set();
-    _total_rs_scrub_time += (os::elapsedTime() - rs_scrub_start);
-  }
-
-  // this will also free any regions totally full of garbage objects,
-  // and sort the regions.
-  g1h->g1_policy()->record_concurrent_mark_cleanup_end();
-
-  // Statistics.
-  double end = os::elapsedTime();
-  _cleanup_times.add((end - start) * 1000.0);
-
-  // Clean up will have freed any regions completely full of garbage.
+  // Cleanup will have freed any regions completely full of garbage.
   // Update the soft reference policy with the new heap occupancy.
   Universe::update_heap_info_at_gc();
 
-  if (VerifyDuringGC) {
-    g1h->verifier()->verify(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "During GC (after)");
-  }
-
-  g1h->verifier()->check_bitmaps("Cleanup End");
-
-  g1h->verifier()->verify_region_sets_optional();
-
-  // We need to make this be a "collection" so any collection pause that
-  // races with it goes around and waits for completeCleanup to finish.
-  g1h->increment_total_collections();
-
   // Clean out dead classes and update Metaspace sizes.
   if (ClassUnloadingWithConcurrentMark) {
+    GCTraceTime(Debug, gc, phases)("Purge Metaspace");
     ClassLoaderDataGraph::purge();
   }
   MetaspaceGC::compute_new_size();
 
   // We reclaimed old regions so we should calculate the sizes to make
   // sure we update the old gen/space data.
-  g1h->g1mm()->update_sizes();
-}
-
-void G1ConcurrentMark::complete_cleanup() {
-  if (has_aborted()) return;
-
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
-  _cleanup_list.verify_optional();
-  FreeRegionList tmp_free_list("Tmp Free List");
-
-  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
-                                  "cleanup list has %u entries",
-                                  _cleanup_list.length());
-
-  // No one else should be accessing the _cleanup_list at this point,
-  // so it is not necessary to take any locks
-  while (!_cleanup_list.is_empty()) {
-    HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
-    assert(hr != NULL, "Got NULL from a non-empty list");
-    hr->par_clear();
-    tmp_free_list.add_ordered(hr);
-
-    // Instead of adding one region at a time to the secondary_free_list,
-    // we accumulate them in the local list and move them a few at a
-    // time. This also cuts down on the number of notify_all() calls
-    // we do during this process. We'll also append the local list when
-    // _cleanup_list is empty (which means we just removed the last
-    // region from the _cleanup_list).
-    if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
-        _cleanup_list.is_empty()) {
-      log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
-                                      "appending %u entries to the secondary_free_list, "
-                                      "cleanup list still has %u entries",
-                                      tmp_free_list.length(),
-                                      _cleanup_list.length());
-
-      {
-        MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
-        g1h->secondary_free_list_add(&tmp_free_list);
-        SecondaryFreeList_lock->notify_all();
-      }
-#ifndef PRODUCT
-      if (G1StressConcRegionFreeing) {
-        for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
-          os::sleep(Thread::current(), (jlong) 1, false);
-        }
-      }
-#endif
-    }
+  _g1h->g1mm()->update_sizes();
+
+  verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
+
+  // We need to make this be a "collection" so any collection pause that
+  // races with it goes around and waits for Cleanup to finish.
+  _g1h->increment_total_collections();
+
+  // Local statistics
+  double recent_cleanup_time = (os::elapsedTime() - start);
+  _total_cleanup_time += recent_cleanup_time;
+  _cleanup_times.add(recent_cleanup_time);
+
+  {
+    GCTraceTime(Debug, gc, phases)("Finalize Concurrent Mark Cleanup");
+    _g1h->g1_policy()->record_concurrent_mark_cleanup_end();
   }
-  assert(tmp_free_list.is_empty(), "post-condition");
 }
 
 // Supporting Object and Oop closures for reference discovery
@@ -1333,7 +1298,7 @@
 bool G1CMIsAliveClosure::do_object_b(oop obj) {
   HeapWord* addr = (HeapWord*)obj;
   return addr != NULL &&
-         (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
+         (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_ill(obj));
 }
 
 // 'Keep Alive' oop closure used by both serial parallel reference processing.
@@ -1348,13 +1313,13 @@
 // of the workers interfering with each other that could occur if
 // operating on the global stack.
 
-class G1CMKeepAliveAndDrainClosure: public OopClosure {
+class G1CMKeepAliveAndDrainClosure : public OopClosure {
   G1ConcurrentMark* _cm;
   G1CMTask*         _task;
   int               _ref_counter_limit;
   int               _ref_counter;
   bool              _is_serial;
- public:
+public:
   G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
     _cm(cm), _task(task), _is_serial(is_serial),
     _ref_counter_limit(G1RefProcDrainInterval) {
@@ -1368,8 +1333,7 @@
 
   template <class T> void do_oop_work(T* p) {
     if (!_cm->has_overflown()) {
-      oop obj = oopDesc::load_decode_heap_oop(p);
-      _task->deal_with_reference(obj);
+      _task->deal_with_reference(p);
       _ref_counter--;
 
       if (_ref_counter == 0) {
@@ -1408,7 +1372,7 @@
 // to drain the marking data structures of the remaining entries
 // added by the 'keep alive' oop closure above.
 
-class G1CMDrainMarkingStackClosure: public VoidClosure {
+class G1CMDrainMarkingStackClosure : public VoidClosure {
   G1ConcurrentMark* _cm;
   G1CMTask*         _task;
   bool              _is_serial;
@@ -1447,7 +1411,7 @@
 // Implementation of AbstractRefProcTaskExecutor for parallel
 // reference processing at the end of G1 concurrent marking
 
-class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
+class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 private:
   G1CollectedHeap*  _g1h;
   G1ConcurrentMark* _cm;
@@ -1467,7 +1431,7 @@
   virtual void execute(EnqueueTask& task);
 };
 
-class G1CMRefProcTaskProxy: public AbstractGangTask {
+class G1CMRefProcTaskProxy : public AbstractGangTask {
   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
   ProcessTask&      _proc_task;
   G1CollectedHeap*  _g1h;
@@ -1509,7 +1473,7 @@
   _workers->run_task(&proc_task_proxy);
 }
 
-class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
+class G1CMRefEnqueueTaskProxy : public AbstractGangTask {
   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
   EnqueueTask& _enq_task;
 
@@ -1540,30 +1504,18 @@
 }
 
 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
-  if (has_overflown()) {
-    // Skip processing the discovered references if we have
-    // overflown the global marking stack. Reference objects
-    // only get discovered once so it is OK to not
-    // de-populate the discovered reference lists. We could have,
-    // but the only benefit would be that, when marking restarts,
-    // less reference objects are discovered.
-    return;
-  }
-
   ResourceMark rm;
   HandleMark   hm;
 
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
   // Is alive closure.
-  G1CMIsAliveClosure g1_is_alive(g1h);
+  G1CMIsAliveClosure g1_is_alive(_g1h);
 
   // Inner scope to exclude the cleaning of the string and symbol
   // tables from the displayed time.
   {
     GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm);
 
-    ReferenceProcessor* rp = g1h->ref_processor_cm();
+    ReferenceProcessor* rp = _g1h->ref_processor_cm();
 
     // See the comment in G1CollectedHeap::ref_processing_init()
     // about how reference processing currently works in G1.
@@ -1594,12 +1546,12 @@
     // otherwise we use the work gang from the G1CollectedHeap and
     // we utilize all the worker threads we can.
     bool processing_is_mt = rp->processing_is_mt();
-    uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
+    uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U);
     active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U);
 
     // Parallel processing task executor.
-    G1CMRefProcTaskExecutor par_task_executor(g1h, this,
-                                              g1h->workers(), active_workers);
+    G1CMRefProcTaskExecutor par_task_executor(_g1h, this,
+                                              _g1h->workers(), active_workers);
     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
 
     // Set the concurrency level. The phase was already set prior to
@@ -1629,7 +1581,7 @@
     // global marking stack.
 
     assert(has_overflown() || _global_mark_stack.is_empty(),
-            "Mark stack should be empty (unless it has overflown)");
+           "Mark stack should be empty (unless it has overflown)");
 
     assert(rp->num_q() == active_workers, "why not");
 
@@ -1643,7 +1595,7 @@
   }
 
   assert(has_overflown() || _global_mark_stack.is_empty(),
-          "Mark stack should be empty (unless it has overflown)");
+         "Mark stack should be empty (unless it has overflown)");
 
   {
     GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
@@ -1661,20 +1613,25 @@
   if (ClassUnloadingWithConcurrentMark) {
     GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
     bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, _gc_timer_cm, false /* Defer cleaning */);
-    g1h->complete_cleaning(&g1_is_alive, purged_classes);
+    _g1h->complete_cleaning(&g1_is_alive, purged_classes);
   } else {
     GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
     // No need to clean string table and symbol table as they are treated as strong roots when
     // class unloading is disabled.
-    g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled());
-
+    _g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled());
   }
 }
 
+void G1ConcurrentMark::report_object_count() {
+  G1CMIsAliveClosure is_alive(_g1h);
+  _gc_tracer_cm->report_object_count_after_gc(&is_alive);
+}
+
 void G1ConcurrentMark::swap_mark_bitmaps() {
   G1CMBitMap* temp = _prev_mark_bitmap;
   _prev_mark_bitmap = _next_mark_bitmap;
   _next_mark_bitmap = temp;
+  _g1h->collector_state()->set_clearing_next_bitmap(true);
 }
 
 // Closure for marking entries in SATB buffers.
@@ -1712,7 +1669,7 @@
  public:
   G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) :
     _cm_satb_cl(task, g1h),
-    _cm_cl(g1h, g1h->concurrent_mark(), task),
+    _cm_cl(g1h, task),
     _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
     _thread_parity(Threads::thread_claim_parity()) {}
 
@@ -1739,8 +1696,7 @@
   }
 };
 
-class G1CMRemarkTask: public AbstractGangTask {
-private:
+class G1CMRemarkTask : public AbstractGangTask {
   G1ConcurrentMark* _cm;
 public:
   void work(uint worker_id) {
@@ -1770,17 +1726,14 @@
   }
 };
 
-void G1ConcurrentMark::checkpoint_roots_final_work() {
+void G1ConcurrentMark::finalize_marking() {
   ResourceMark rm;
   HandleMark   hm;
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
-  GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm);
-
-  g1h->ensure_parsability(false);
+
+  _g1h->ensure_parsability(false);
 
   // this is remark, so we'll use up all active threads
-  uint active_workers = g1h->workers()->active_workers();
+  uint active_workers = _g1h->workers()->active_workers();
   set_concurrency_and_phase(active_workers, false /* concurrent */);
   // Leave _parallel_marking_threads at it's
   // value originally calculated in the G1ConcurrentMark
@@ -1794,7 +1747,7 @@
     // We will start all available threads, even if we decide that the
     // active_workers will be fewer. The extra ones will just bail out
     // immediately.
-    g1h->workers()->run_task(&remarkTask);
+    _g1h->workers()->run_task(&remarkTask);
   }
 
   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
@@ -1807,6 +1760,19 @@
   print_stats();
 }
 
+void G1ConcurrentMark::flush_all_task_caches() {
+  size_t hits = 0;
+  size_t misses = 0;
+  for (uint i = 0; i < _max_num_tasks; i++) {
+    Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache();
+    hits += stats.first;
+    misses += stats.second;
+  }
+  size_t sum = hits + misses;
+  log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf",
+                       hits, misses, percent_of(hits, sum));
+}
+
 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) {
   _prev_mark_bitmap->clear_range(mr);
 }
@@ -1816,9 +1782,7 @@
   // "checkpoint" the finger
   HeapWord* finger = _finger;
 
-  // _heap_end will not change underneath our feet; it only changes at
-  // yield points.
-  while (finger < _heap_end) {
+  while (finger < _heap.end()) {
     assert(_g1h->is_in_g1_reserved(finger), "invariant");
 
     HeapRegion* curr_region = _g1h->heap_region_containing(finger);
@@ -1860,7 +1824,6 @@
 
 #ifndef PRODUCT
 class VerifyNoCSetOops {
-private:
   G1CollectedHeap* _g1h;
   const char* _phase;
   int _info;
@@ -1888,7 +1851,7 @@
 
 void G1ConcurrentMark::verify_no_cset_oops() {
   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
-  if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) {
+  if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
     return;
   }
 
@@ -1903,7 +1866,7 @@
 
   // Verify the global finger
   HeapWord* global_finger = finger();
-  if (global_finger != NULL && global_finger < _heap_end) {
+  if (global_finger != NULL && global_finger < _heap.end()) {
     // Since we always iterate over all regions, we might get a NULL HeapRegion
     // here.
     HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
@@ -1917,7 +1880,7 @@
   for (uint i = 0; i < _num_concurrent_workers; ++i) {
     G1CMTask* task = _tasks[i];
     HeapWord* task_finger = task->finger();
-    if (task_finger != NULL && task_finger < _heap_end) {
+    if (task_finger != NULL && task_finger < _heap.end()) {
       // See above note on the global finger verification.
       HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
@@ -1928,28 +1891,11 @@
   }
 }
 #endif // PRODUCT
-void G1ConcurrentMark::create_live_data() {
-  _g1h->g1_rem_set()->create_card_live_data(_concurrent_workers, _next_mark_bitmap);
-}
-
-void G1ConcurrentMark::finalize_live_data() {
-  _g1h->g1_rem_set()->finalize_card_live_data(_g1h->workers(), _next_mark_bitmap);
-}
-
-void G1ConcurrentMark::verify_live_data() {
-  _g1h->g1_rem_set()->verify_card_live_data(_g1h->workers(), _next_mark_bitmap);
+
+void G1ConcurrentMark::rebuild_rem_set_concurrently() {
+  _g1h->g1_rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset);
 }
 
-void G1ConcurrentMark::clear_live_data(WorkGang* workers) {
-  _g1h->g1_rem_set()->clear_card_live_data(workers);
-}
-
-#ifdef ASSERT
-void G1ConcurrentMark::verify_live_data_clear() {
-  _g1h->g1_rem_set()->verify_card_live_data_is_clear();
-}
-#endif
-
 void G1ConcurrentMark::print_stats() {
   if (!log_is_enabled(Debug, gc, stats)) {
     return;
@@ -1961,7 +1907,7 @@
   }
 }
 
-void G1ConcurrentMark::abort() {
+void G1ConcurrentMark::concurrent_cycle_abort() {
   if (!cm_thread()->during_cycle() || _has_aborted) {
     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
     return;
@@ -1977,16 +1923,8 @@
   // since VerifyDuringGC verifies the objects marked during
   // a full GC against the previous bitmap.
 
-  {
-    GCTraceTime(Debug, gc)("Clear Live Data");
-    clear_live_data(_g1h->workers());
-  }
-  DEBUG_ONLY({
-    GCTraceTime(Debug, gc)("Verify Live Data Clear");
-    verify_live_data_clear();
-  })
   // Empty mark stack
-  reset_marking_state();
+  reset_marking_for_restart();
   for (uint i = 0; i < _max_num_tasks; ++i) {
     _tasks[i]->clear_region_fields();
   }
@@ -2029,11 +1967,7 @@
   }
   print_ms_time_info("  ", "cleanups", _cleanup_times);
   log.trace("    Finalize live data total time = %8.2f s (avg = %8.2f ms).",
-            _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
-  if (G1ScrubRemSets) {
-    log.trace("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
-              _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
-  }
+            _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
   log.trace("  Total stop_world time = %8.2f s.",
             (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
   log.trace("  Total concurrent time = %8.2f s (%8.2f s marking).",
@@ -2062,10 +1996,9 @@
 }
 
 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
-                               G1ConcurrentMark* cm,
                                G1CMTask* task)
   : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)),
-    _g1h(g1h), _cm(cm), _task(task)
+    _g1h(g1h), _task(task)
 { }
 
 void G1CMTask::setup_for_region(HeapRegion* hr) {
@@ -2139,6 +2072,8 @@
   _elapsed_time_ms               = 0.0;
   _termination_time_ms           = 0.0;
   _termination_start_time_ms     = 0.0;
+
+  _mark_stats_cache.reset();
 }
 
 bool G1CMTask::should_exit_termination() {
@@ -2157,7 +2092,9 @@
 }
 
 void G1CMTask::regular_clock_call() {
-  if (has_aborted()) return;
+  if (has_aborted()) {
+    return;
+  }
 
   // First, we need to recalculate the words scanned and refs reached
   // limits for the next clock call.
@@ -2174,7 +2111,7 @@
   // If we are not concurrent (i.e. we're doing remark) we don't need
   // to check anything else. The other steps are only needed during
   // the concurrent marking phase.
-  if (!_concurrent) {
+  if (!_cm->concurrent()) {
     return;
   }
 
@@ -2314,7 +2251,9 @@
 }
 
 void G1CMTask::drain_global_stack(bool partially) {
-  if (has_aborted()) return;
+  if (has_aborted()) {
+    return;
+  }
 
   // We have a policy to drain the local queue before we attempt to
   // drain the global stack.
@@ -2347,7 +2286,9 @@
 // replicated. We should really get rid of the single-threaded version
 // of the code to simplify things.
 void G1CMTask::drain_satb_buffers() {
-  if (has_aborted()) return;
+  if (has_aborted()) {
+    return;
+  }
 
   // We set this so that the regular clock knows that we're in the
   // middle of draining buffers and doesn't set the abort flag when it
@@ -2368,7 +2309,7 @@
   _draining_satb_buffers = false;
 
   assert(has_aborted() ||
-         _concurrent ||
+         _cm->concurrent() ||
          satb_mq_set.completed_buffers_num() == 0, "invariant");
 
   // again, this was a potentially expensive operation, decrease the
@@ -2376,16 +2317,28 @@
   decrease_limits();
 }
 
+void G1CMTask::clear_mark_stats_cache(uint region_idx) {
+  _mark_stats_cache.reset(region_idx);
+}
+
+Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() {
+  return _mark_stats_cache.evict_all();
+}
+
 void G1CMTask::print_stats() {
-  log_debug(gc, stats)("Marking Stats, task = %u, calls = %u",
-                       _worker_id, _calls);
+  log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls);
   log_debug(gc, stats)("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
                        _elapsed_time_ms, _termination_time_ms);
-  log_debug(gc, stats)("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
-                       _step_times_ms.num(), _step_times_ms.avg(),
-                       _step_times_ms.sd());
-  log_debug(gc, stats)("                    max = %1.2lfms, total = %1.2lfms",
-                       _step_times_ms.maximum(), _step_times_ms.sum());
+  log_debug(gc, stats)("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms",
+                       _step_times_ms.num(),
+                       _step_times_ms.avg(),
+                       _step_times_ms.sd(),
+                       _step_times_ms.maximum(),
+                       _step_times_ms.sum());
+  size_t const hits = _mark_stats_cache.hits();
+  size_t const misses = _mark_stats_cache.misses();
+  log_debug(gc, stats)("  Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f",
+                       hits, misses, percent_of(hits, hits + misses));
 }
 
 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry) {
@@ -2511,7 +2464,6 @@
                                bool do_termination,
                                bool is_serial) {
   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
-  assert(_concurrent == _cm->concurrent(), "they should be the same");
 
   _start_time_ms = os::elapsedVTime() * 1000.0;
 
@@ -2541,7 +2493,7 @@
   // eventually called from this method, so it is OK to allocate these
   // statically.
   G1CMBitMapClosure bitmap_closure(this, _cm);
-  G1CMOopClosure    cm_oop_closure(_g1h, _cm, this);
+  G1CMOopClosure cm_oop_closure(_g1h, this);
   set_cm_oop_closure(&cm_oop_closure);
 
   if (_cm->has_overflown()) {
@@ -2731,17 +2683,6 @@
     if (finished) {
       // We're all done.
 
-      if (_worker_id == 0) {
-        // Let's allow task 0 to do this
-        if (_concurrent) {
-          assert(_cm->concurrent_marking_in_progress(), "invariant");
-          // We need to set this to false before the next
-          // safepoint. This way we ensure that the marking phase
-          // doesn't observe any more heap expansions.
-          _cm->clear_concurrent_marking_in_progress();
-        }
-      }
-
       // We can now guarantee that the global stack is empty, since
       // all other tasks have finished. We separated the guarantees so
       // that, if a condition is false, we can immediately find out
@@ -2791,14 +2732,29 @@
 
         // When we exit this sync barrier we know that all tasks have
         // stopped doing marking work. So, it's now safe to
-        // re-initialize our data structures. At the end of this method,
-        // task 0 will clear the global data structures.
+        // re-initialize our data structures.
       }
 
-      // We clear the local state of this task...
       clear_region_fields();
+      flush_mark_stats_cache();
 
       if (!is_serial) {
+        // If we're executing the concurrent phase of marking, reset the marking
+        // state; otherwise the marking state is reset after reference processing,
+        // during the remark pause.
+        // If we reset here as a result of an overflow during the remark we will
+        // see assertion failures from any subsequent set_concurrency_and_phase()
+        // calls.
+        if (_cm->concurrent() && _worker_id == 0) {
+          // Worker 0 is responsible for clearing the global data structures because
+          // of an overflow. During STW we should not clear the overflow flag (in
+          // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit
+          // method to abort the pause and restart concurrent marking.
+          _cm->reset_marking_for_restart();
+
+          log_info(gc, marking)("Concurrent Mark reset for overflow");
+        }
+
         // ...and enter the second barrier.
         _cm->enter_second_sync_barrier(_worker_id);
       }
@@ -2809,13 +2765,18 @@
   }
 }
 
-G1CMTask::G1CMTask(uint worker_id, G1ConcurrentMark* cm, G1CMTaskQueue* task_queue) :
+G1CMTask::G1CMTask(uint worker_id,
+                   G1ConcurrentMark* cm,
+                   G1CMTaskQueue* task_queue,
+                   G1RegionMarkStats* mark_stats,
+                   uint max_regions) :
   _objArray_processor(this),
   _worker_id(worker_id),
   _g1h(G1CollectedHeap::heap()),
   _cm(cm),
   _next_mark_bitmap(NULL),
   _task_queue(task_queue),
+  _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize),
   _calls(0),
   _time_target_ms(0.0),
   _start_time_ms(0.0),
@@ -2837,7 +2798,6 @@
   _elapsed_time_ms(0.0),
   _termination_time_ms(0.0),
   _termination_start_time_ms(0.0),
-  _concurrent(false),
   _marking_step_diffs_ms()
 {
   guarantee(task_queue != NULL, "invariant");
@@ -2866,6 +2826,8 @@
 // For per-region info
 #define G1PPRL_TYPE_FORMAT            "   %-4s"
 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
+#define G1PPRL_STATE_FORMAT           "   %-5s"
+#define G1PPRL_STATE_H_FORMAT         "   %5s"
 #define G1PPRL_BYTE_FORMAT            "  " SIZE_FORMAT_W(9)
 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
@@ -2902,10 +2864,11 @@
                           G1PPRL_BYTE_H_FORMAT
                           G1PPRL_DOUBLE_H_FORMAT
                           G1PPRL_BYTE_H_FORMAT
+                          G1PPRL_STATE_H_FORMAT
                           G1PPRL_BYTE_H_FORMAT,
                           "type", "address-range",
                           "used", "prev-live", "next-live", "gc-eff",
-                          "remset", "code-roots");
+                          "remset", "state", "code-roots");
   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
                           G1PPRL_TYPE_H_FORMAT
                           G1PPRL_ADDR_BASE_H_FORMAT
@@ -2914,10 +2877,11 @@
                           G1PPRL_BYTE_H_FORMAT
                           G1PPRL_DOUBLE_H_FORMAT
                           G1PPRL_BYTE_H_FORMAT
+                          G1PPRL_STATE_H_FORMAT
                           G1PPRL_BYTE_H_FORMAT,
                           "", "",
                           "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
-                          "(bytes)", "(bytes)");
+                          "(bytes)", "", "(bytes)");
 }
 
 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) {
@@ -2931,6 +2895,7 @@
   double gc_eff          = r->gc_efficiency();
   size_t remset_bytes    = r->rem_set()->mem_size();
   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
+  const char* remset_type = r->rem_set()->get_short_state_str();
 
   _total_used_bytes      += used_bytes;
   _total_capacity_bytes  += capacity_bytes;
@@ -2948,10 +2913,11 @@
                           G1PPRL_BYTE_FORMAT
                           G1PPRL_DOUBLE_FORMAT
                           G1PPRL_BYTE_FORMAT
+                          G1PPRL_STATE_FORMAT
                           G1PPRL_BYTE_FORMAT,
                           type, p2i(bottom), p2i(end),
                           used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
-                          remset_bytes, strong_code_roots_bytes);
+                          remset_bytes, remset_type, strong_code_roots_bytes);
 
   return false;
 }
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,12 +27,14 @@
 
 #include "gc/g1/g1ConcurrentMarkBitMap.hpp"
 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp"
+#include "gc/g1/g1HeapVerifier.hpp"
+#include "gc/g1/g1RegionMarkStatsCache.hpp"
 #include "gc/g1/heapRegionSet.hpp"
 #include "gc/shared/taskqueue.hpp"
 #include "memory/allocation.hpp"
 
 class ConcurrentGCTimer;
-class ConcurrentMarkThread;
+class G1ConcurrentMarkThread;
 class G1CollectedHeap;
 class G1CMTask;
 class G1ConcurrentMark;
@@ -103,10 +105,10 @@
 // to determine if referents of discovered reference objects
 // are alive. An instance is also embedded into the
 // reference processor as the _is_alive_non_header field
-class G1CMIsAliveClosure: public BoolObjectClosure {
-  G1CollectedHeap* _g1;
+class G1CMIsAliveClosure : public BoolObjectClosure {
+  G1CollectedHeap* _g1h;
  public:
-  G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { }
+  G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1h(g1) { }
 
   bool do_object_b(oop obj);
 };
@@ -275,8 +277,8 @@
 
 // This class manages data structures and methods for doing liveness analysis in
 // G1's concurrent cycle.
-class G1ConcurrentMark: public CHeapObj<mtGC> {
-  friend class ConcurrentMarkThread;
+class G1ConcurrentMark : public CHeapObj<mtGC> {
+  friend class G1ConcurrentMarkThread;
   friend class G1CMRefProcTaskProxy;
   friend class G1CMRefProcTaskExecutor;
   friend class G1CMKeepAliveAndDrainClosure;
@@ -286,37 +288,35 @@
   friend class G1CMRemarkTask;
   friend class G1CMTask;
 
-  ConcurrentMarkThread*  _cm_thread;     // The thread doing the work
-  G1CollectedHeap*       _g1h;           // The heap
-  bool                   _completed_initialization; // Set to true when initialization is complete
-
-  FreeRegionList         _cleanup_list;
+  G1ConcurrentMarkThread* _cm_thread;     // The thread doing the work
+  G1CollectedHeap*        _g1h;           // The heap
+  bool                    _completed_initialization; // Set to true when initialization is complete
 
   // Concurrent marking support structures
-  G1CMBitMap             _mark_bitmap_1;
-  G1CMBitMap             _mark_bitmap_2;
-  G1CMBitMap*            _prev_mark_bitmap; // Completed mark bitmap
-  G1CMBitMap*            _next_mark_bitmap; // Under-construction mark bitmap
+  G1CMBitMap              _mark_bitmap_1;
+  G1CMBitMap              _mark_bitmap_2;
+  G1CMBitMap*             _prev_mark_bitmap; // Completed mark bitmap
+  G1CMBitMap*             _next_mark_bitmap; // Under-construction mark bitmap
 
   // Heap bounds
-  HeapWord*              _heap_start;
-  HeapWord*              _heap_end;
+  MemRegion const         _heap;
 
   // Root region tracking and claiming
-  G1CMRootRegions        _root_regions;
+  G1CMRootRegions         _root_regions;
 
   // For grey objects
-  G1CMMarkStack          _global_mark_stack; // Grey objects behind global finger
-  HeapWord* volatile     _finger;            // The global finger, region aligned,
-                                             // always pointing to the end of the
-                                             // last claimed region
+  G1CMMarkStack           _global_mark_stack; // Grey objects behind global finger
+  HeapWord* volatile      _finger;            // The global finger, region aligned,
+                                              // always pointing to the end of the
+                                              // last claimed region
 
-  uint                   _max_num_tasks;    // Maximum number of marking tasks
-  uint                   _num_active_tasks; // Number of tasks currently active
-  G1CMTask**             _tasks;            // Task queue array (max_worker_id length)
+  uint                    _worker_id_offset;
+  uint                    _max_num_tasks;    // Maximum number of marking tasks
+  uint                    _num_active_tasks; // Number of tasks currently active
+  G1CMTask**              _tasks;            // Task queue array (max_worker_id length)
 
-  G1CMTaskQueueSet*      _task_queues;      // Task queue set
-  ParallelTaskTerminator _terminator;       // For termination
+  G1CMTaskQueueSet*       _task_queues;      // Task queue set
+  ParallelTaskTerminator  _terminator;       // For termination
 
   // Two sync barriers that are used to synchronize tasks when an
   // overflow occurs. The algorithm is the following. All tasks enter
@@ -327,30 +327,24 @@
   // ensure, that no task starts doing work before all data
   // structures (local and global) have been re-initialized. When they
   // exit it, they are free to start working again.
-  WorkGangBarrierSync    _first_overflow_barrier_sync;
-  WorkGangBarrierSync    _second_overflow_barrier_sync;
+  WorkGangBarrierSync     _first_overflow_barrier_sync;
+  WorkGangBarrierSync     _second_overflow_barrier_sync;
 
   // This is set by any task, when an overflow on the global data
   // structures is detected
-  volatile bool          _has_overflown;
+  volatile bool           _has_overflown;
   // True: marking is concurrent, false: we're in remark
-  volatile bool          _concurrent;
+  volatile bool           _concurrent;
   // Set at the end of a Full GC so that marking aborts
-  volatile bool          _has_aborted;
+  volatile bool           _has_aborted;
 
   // Used when remark aborts due to an overflow to indicate that
   // another concurrent marking phase should start
-  volatile bool          _restart_for_overflow;
+  volatile bool           _restart_for_overflow;
 
-  // This is true from the very start of concurrent marking until the
-  // point when all the tasks complete their work. It is really used
-  // to determine the points between the end of concurrent marking and
-  // time of remark.
-  volatile bool          _concurrent_marking_in_progress;
+  ConcurrentGCTimer*      _gc_timer_cm;
 
-  ConcurrentGCTimer*     _gc_timer_cm;
-
-  G1OldTracer*           _gc_tracer_cm;
+  G1OldTracer*            _gc_tracer_cm;
 
   // Timing statistics. All of them are in ms
   NumberSeq _init_times;
@@ -358,8 +352,7 @@
   NumberSeq _remark_mark_times;
   NumberSeq _remark_weak_ref_times;
   NumberSeq _cleanup_times;
-  double    _total_counting_time;
-  double    _total_rs_scrub_time;
+  double    _total_cleanup_time;
 
   double*   _accum_task_vtime;   // Accumulated task vtime
 
@@ -367,22 +360,34 @@
   uint      _num_concurrent_workers; // The number of marking worker threads we're using
   uint      _max_concurrent_workers; // Maximum number of marking worker threads
 
+  void verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller);
+
+  void finalize_marking();
+
   void weak_refs_work_parallel_part(BoolObjectClosure* is_alive, bool purged_classes);
   void weak_refs_work(bool clear_all_soft_refs);
 
+  void report_object_count();
+
   void swap_mark_bitmaps();
 
+  void reclaim_empty_regions();
+
+  // Clear statistics gathered during the concurrent cycle for the given region after
+  // it has been reclaimed.
+  void clear_statistics(HeapRegion* r);
+
   // Resets the global marking data structures, as well as the
   // task local ones; should be called during initial mark.
   void reset();
 
   // Resets all the marking data structures. Called when we have to restart
   // marking or when marking completes (via set_non_marking_state below).
-  void reset_marking_state();
+  void reset_marking_for_restart();
 
   // We do this after we're done with marking so that the marking data
   // structures are initialized to a sensible and predictable state.
-  void set_non_marking_state();
+  void reset_at_marking_complete();
 
   // Called to indicate how many threads are currently active.
   void set_concurrency(uint active_tasks);
@@ -394,10 +399,6 @@
   // Prints all gathered CM-related statistics
   void print_stats();
 
-  bool cleanup_list_is_empty() {
-    return _cleanup_list.is_empty();
-  }
-
   HeapWord*               finger()          { return _finger;   }
   bool                    concurrent()      { return _concurrent; }
   uint                    active_tasks()    { return _num_active_tasks; }
@@ -424,11 +425,13 @@
   // to satisfy an allocation without doing a GC. This is fine, because all
   // objects in those regions will be considered live anyway because of
   // SATB guarantees (i.e. their TAMS will be equal to bottom).
-  bool out_of_regions() { return _finger >= _heap_end; }
+  bool out_of_regions() { return _finger >= _heap.end(); }
 
   // Returns the task with the given id
   G1CMTask* task(uint id) {
-    assert(id < _num_active_tasks, "Task id %u not within active bounds up to %u", id, _num_active_tasks);
+    // During initial mark we use the parallel gc threads to do some work, so
+    // we can only compare against _max_num_tasks.
+    assert(id < _max_num_tasks, "Task id %u not within bounds up to %u", id, _max_num_tasks);
     return _tasks[id];
   }
 
@@ -446,7 +449,30 @@
   // Clear the given bitmap in parallel using the given WorkGang. If may_yield is
   // true, periodically insert checks to see if this method should exit prematurely.
   void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
+
+  // Region statistics gathered during marking.
+  G1RegionMarkStats* _region_mark_stats;
+  // Top pointer for each region at the start of the rebuild remembered set process
+  // for regions which remembered sets need to be rebuilt. A NULL for a given region
+  // means that this region does not be scanned during the rebuilding remembered
+  // set phase at all.
+  HeapWord* volatile* _top_at_rebuild_starts;
 public:
+  void add_to_liveness(uint worker_id, oop const obj, size_t size);
+  // Liveness of the given region as determined by concurrent marking, i.e. the amount of
+  // live words between bottom and nTAMS.
+  size_t liveness(uint region)  { return _region_mark_stats[region]._live_words; }
+
+  // Sets the internal top_at_region_start for the given region to current top of the region.
+  inline void update_top_at_rebuild_start(HeapRegion* r);
+  // TARS for the given region during remembered set rebuilding.
+  inline HeapWord* top_at_rebuild_start(uint region) const;
+
+  // Clear statistics gathered during the concurrent cycle for the given region after
+  // it has been reclaimed.
+  void clear_statistics_in_region(uint region_idx);
+  // Notification for eagerly reclaimed regions to clean up.
+  void humongous_object_eagerly_reclaimed(HeapRegion* r);
   // Manipulation of the global mark stack.
   // The push and pop operations are used by tasks for transfers
   // between task-local queues and the global mark stack.
@@ -466,17 +492,9 @@
 
   G1CMRootRegions* root_regions() { return &_root_regions; }
 
-  bool concurrent_marking_in_progress() const {
-    return _concurrent_marking_in_progress;
-  }
-  void set_concurrent_marking_in_progress() {
-    _concurrent_marking_in_progress = true;
-  }
-  void clear_concurrent_marking_in_progress() {
-    _concurrent_marking_in_progress = false;
-  }
-
   void concurrent_cycle_start();
+  // Abandon current marking iteration due to a Full GC.
+  void concurrent_cycle_abort();
   void concurrent_cycle_end();
 
   void update_accum_task_vtime(int i, double vtime) {
@@ -498,7 +516,7 @@
                    G1RegionToSpaceMapper* next_bitmap_storage);
   ~G1ConcurrentMark();
 
-  ConcurrentMarkThread* cm_thread() { return _cm_thread; }
+  G1ConcurrentMarkThread* cm_thread() { return _cm_thread; }
 
   const G1CMBitMap* const prev_mark_bitmap() const { return _prev_mark_bitmap; }
   G1CMBitMap* next_mark_bitmap() const { return _next_mark_bitmap; }
@@ -506,6 +524,8 @@
   // Calculates the number of concurrent GC threads to be used in the marking phase.
   uint calc_active_marking_workers();
 
+  // Moves all per-task cached data into global state.
+  void flush_all_task_caches();
   // Prepare internal data structures for the next mark cycle. This includes clearing
   // the next mark bitmap and some internal data structures. This method is intended
   // to be called concurrently to the mutator. It will yield to safepoint requests.
@@ -518,31 +538,24 @@
   // only. Will not yield to pause requests.
   bool next_mark_bitmap_is_clear();
 
-  // These two do the work that needs to be done before and after the
-  // initial root checkpoint. Since this checkpoint can be done at two
-  // different points (i.e. an explicit pause or piggy-backed on a
-  // young collection), then it's nice to be able to easily share the
-  // pre/post code. It might be the case that we can put everything in
-  // the post method.
-  void checkpoint_roots_initial_pre();
-  void checkpoint_roots_initial_post();
+  // These two methods do the work that needs to be done at the start and end of the
+  // initial mark pause.
+  void pre_initial_mark();
+  void post_initial_mark();
 
   // Scan all the root regions and mark everything reachable from
   // them.
   void scan_root_regions();
 
   // Scan a single root region and mark everything reachable from it.
-  void scan_root_region(HeapRegion* hr);
+  void scan_root_region(HeapRegion* hr, uint worker_id);
 
   // Do concurrent phase of marking, to a tentative transitive closure.
   void mark_from_roots();
 
-  void checkpoint_roots_final(bool clear_all_soft_refs);
-  void checkpoint_roots_final_work();
+  void remark();
 
   void cleanup();
-  void complete_cleanup();
-
   // Mark in the previous bitmap. Caution: the prev bitmap is usually read-only, so use
   // this carefully.
   inline void mark_in_prev_bitmap(oop p);
@@ -554,16 +567,13 @@
 
   inline bool is_marked_in_prev_bitmap(oop p) const;
 
-  // Verify that there are no CSet oops on the stacks (taskqueues /
+  // Verify that there are no collection set oops on the stacks (taskqueues /
   // global mark stack) and fingers (global / per-task).
   // If marking is not in progress, it's a no-op.
   void verify_no_cset_oops() PRODUCT_RETURN;
 
   inline bool do_yield_check();
 
-  // Abandon current marking iteration due to a Full GC.
-  void abort();
-
   bool has_aborted()      { return _has_aborted; }
 
   void print_summary_info();
@@ -574,8 +584,10 @@
   void print_on_error(outputStream* st) const;
 
   // Mark the given object on the next bitmap if it is below nTAMS.
-  inline bool mark_in_next_bitmap(HeapRegion* const hr, oop const obj);
-  inline bool mark_in_next_bitmap(oop const obj);
+  // If the passed obj_size is zero, it is recalculated from the given object if
+  // needed. This is to be as lazy as possible with accessing the object's size.
+  inline bool mark_in_next_bitmap(uint worker_id, HeapRegion* const hr, oop const obj, size_t const obj_size = 0);
+  inline bool mark_in_next_bitmap(uint worker_id, oop const obj, size_t const obj_size = 0);
 
   // Returns true if initialization was successfully completed.
   bool completed_initialization() const {
@@ -586,21 +598,8 @@
   G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; }
 
 private:
-  // Clear (Reset) all liveness count data.
-  void clear_live_data(WorkGang* workers);
-
-#ifdef ASSERT
-  // Verify all of the above data structures that they are in initial state.
-  void verify_live_data_clear();
-#endif
-
-  // Aggregates the per-card liveness data based on the current marking. Also sets
-  // the amount of marked bytes for each region.
-  void create_live_data();
-
-  void finalize_live_data();
-
-  void verify_live_data();
+  // Rebuilds the remembered sets for chosen regions in parallel and concurrently to the application.
+  void rebuild_rem_set_concurrently();
 };
 
 // A class representing a marking task.
@@ -617,6 +616,10 @@
     init_hash_seed                = 17
   };
 
+  // Number of entries in the per-task stats entry. This seems enough to have a very
+  // low cache miss rate.
+  static const uint RegionMarkStatsCacheSize = 1024;
+
   G1CMObjArrayProcessor       _objArray_processor;
 
   uint                        _worker_id;
@@ -626,6 +629,7 @@
   // the task queue of this task
   G1CMTaskQueue*              _task_queue;
 
+  G1RegionMarkStatsCache      _mark_stats_cache;
   // Number of calls to this task
   uint                        _calls;
 
@@ -686,12 +690,6 @@
   // When this task got into the termination protocol
   double                      _termination_start_time_ms;
 
-  // True when the task is during a concurrent phase, false when it is
-  // in the remark phase (so, in the latter case, we do not have to
-  // check all the things that we have to check during the concurrent
-  // phase, i.e. SATB buffer availability...)
-  bool                        _concurrent;
-
   TruncatedSeq                _marking_step_diffs_ms;
 
   // Updates the local fields after this task has claimed
@@ -735,8 +733,6 @@
   // Clears all the fields that correspond to a claimed region.
   void clear_region_fields();
 
-  void set_concurrent(bool concurrent) { _concurrent = concurrent; }
-
   // The main method of this class which performs a marking step
   // trying not to exceed the given duration. However, it might exit
   // prematurely, according to some conditions (i.e. SATB buffers are
@@ -784,7 +780,8 @@
   // Grey the object (by calling make_grey_reference) if required,
   // e.g. obj is below its containing region's NTAMS.
   // Precondition: obj is a valid heap object.
-  inline void deal_with_reference(oop obj);
+  template <class T>
+  inline void deal_with_reference(T* p);
 
   // Scans an object and visits its children.
   inline void scan_task_entry(G1TaskQueueEntry task_entry);
@@ -818,8 +815,17 @@
 
   G1CMTask(uint worker_id,
            G1ConcurrentMark *cm,
-           G1CMTaskQueue* task_queue);
+           G1CMTaskQueue* task_queue,
+           G1RegionMarkStats* mark_stats,
+           uint max_regions);
+
+  inline void update_liveness(oop const obj, size_t const obj_size);
 
+  // Clear (without flushing) the mark cache entry for the given region.
+  void clear_mark_stats_cache(uint region_idx);
+  // Evict the whole statistics cache into the global statistics. Returns the
+  // number of cache hits and misses so far.
+  Pair<size_t, size_t> flush_mark_stats_cache();
   // Prints statistics associated with this task
   void print_stats();
 };
@@ -827,7 +833,7 @@
 // Class that's used to to print out per-region liveness
 // information. It's currently used at the end of marking and also
 // after we sort the old regions at the end of the cleanup operation.
-class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure {
+class G1PrintRegionLivenessInfoClosure : public HeapRegionClosure {
 private:
   // Accumulators for these values.
   size_t _total_used_bytes;
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -29,16 +29,21 @@
 #include "gc/g1/g1ConcurrentMark.hpp"
 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp"
+#include "gc/g1/g1Policy.hpp"
+#include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
+#include "gc/g1/g1RemSetTrackingPolicy.hpp"
+#include "gc/g1/heapRegionRemSet.hpp"
+#include "gc/g1/heapRegion.hpp"
 #include "gc/shared/suspendibleThreadSet.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
 #include "utilities/bitMap.inline.hpp"
 
-inline bool G1ConcurrentMark::mark_in_next_bitmap(oop const obj) {
+inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, oop const obj, size_t const obj_size) {
   HeapRegion* const hr = _g1h->heap_region_containing(obj);
-  return mark_in_next_bitmap(hr, obj);
+  return mark_in_next_bitmap(worker_id, hr, obj, obj_size);
 }
 
-inline bool G1ConcurrentMark::mark_in_next_bitmap(HeapRegion* const hr, oop const obj) {
+inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, HeapRegion* const hr, oop const obj, size_t const obj_size) {
   assert(hr != NULL, "just checking");
   assert(hr->is_in_reserved(obj), "Attempting to mark object at " PTR_FORMAT " that is not contained in the given region %u", p2i(obj), hr->hrm_index());
 
@@ -52,7 +57,11 @@
 
   HeapWord* const obj_addr = (HeapWord*)obj;
 
-  return _next_mark_bitmap->par_mark(obj_addr);
+  bool success = _next_mark_bitmap->par_mark(obj_addr);
+  if (success) {
+    add_to_liveness(worker_id, obj, obj_size == 0 ? obj->size() : obj_size);
+  }
+  return success;
 }
 
 #ifndef PRODUCT
@@ -157,8 +166,35 @@
   return mr.word_size();
 }
 
+inline HeapWord* G1ConcurrentMark::top_at_rebuild_start(uint region) const {
+  assert(region < _g1h->max_regions(), "Tried to access TARS for region %u out of bounds", region);
+  return _top_at_rebuild_starts[region];
+}
+
+inline void G1ConcurrentMark::update_top_at_rebuild_start(HeapRegion* r) {
+  uint const region = r->hrm_index();
+  assert(region < _g1h->max_regions(), "Tried to access TARS for region %u out of bounds", region);
+  assert(_top_at_rebuild_starts[region] == NULL,
+         "TARS for region %u has already been set to " PTR_FORMAT " should be NULL",
+         region, p2i(_top_at_rebuild_starts[region]));
+  G1RemSetTrackingPolicy* tracker = _g1h->g1_policy()->remset_tracker();
+  if (tracker->needs_scan_for_rebuild(r)) {
+    _top_at_rebuild_starts[region] = r->top();
+  } else {
+    // Leave TARS at NULL.
+  }
+}
+
+inline void G1CMTask::update_liveness(oop const obj, const size_t obj_size) {
+  _mark_stats_cache.add_live_words(_g1h->addr_to_region((HeapWord*)obj), obj_size);
+}
+
+inline void G1ConcurrentMark::add_to_liveness(uint worker_id, oop const obj, size_t size) {
+  task(worker_id)->update_liveness(obj, size);
+}
+
 inline void G1CMTask::make_reference_grey(oop obj) {
-  if (!_cm->mark_in_next_bitmap(obj)) {
+  if (!_cm->mark_in_next_bitmap(_worker_id, obj)) {
     return;
   }
 
@@ -199,8 +235,10 @@
   }
 }
 
-inline void G1CMTask::deal_with_reference(oop obj) {
+template <class T>
+inline void G1CMTask::deal_with_reference(T* p) {
   increment_refs_reached();
+  oop const obj = RawAccess<MO_VOLATILE>::oop_load(p);
   if (obj == NULL) {
     return;
   }
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkBitMap.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkBitMap.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -39,7 +39,6 @@
 
 // Closure for iteration over bitmaps
 class G1CMBitMapClosure {
-private:
   G1ConcurrentMark* const _cm;
   G1CMTask* const _task;
 public:
@@ -49,9 +48,8 @@
 };
 
 class G1CMBitMapMappingChangedListener : public G1MappingChangedListener {
- private:
   G1CMBitMap* _bm;
- public:
+public:
   G1CMBitMapMappingChangedListener() : _bm(NULL) {}
 
   void set_bitmap(G1CMBitMap* bm) { _bm = bm; }
@@ -62,7 +60,6 @@
 // A generic mark bitmap for concurrent marking.  This is essentially a wrapper
 // around the BitMap class that is based on HeapWords, with one bit per (1 << _shifter) HeapWords.
 class G1CMBitMap {
-private:
   MemRegion _covered;    // The heap area covered by this bitmap.
 
   const int _shifter;    // Shift amount from heap index to bit index in the bitmap.
@@ -114,9 +111,6 @@
   inline HeapWord* get_next_marked_addr(const HeapWord* addr,
                                         const HeapWord* limit) const;
 
-  // The argument addr should be the start address of a valid object
-  inline HeapWord* addr_after_obj(HeapWord* addr);
-
   void print_on_error(outputStream* st, const char* prefix) const;
 
   // Write marks.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/classLoaderData.hpp"
+#include "gc/g1/g1Analytics.hpp"
+#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1ConcurrentMark.inline.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
+#include "gc/g1/g1MMUTracker.hpp"
+#include "gc/g1/g1Policy.hpp"
+#include "gc/g1/g1RemSet.hpp"
+#include "gc/g1/vm_operations_g1.hpp"
+#include "gc/shared/concurrentGCPhaseManager.hpp"
+#include "gc/shared/gcId.hpp"
+#include "gc/shared/gcTrace.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
+#include "logging/log.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/vmThread.hpp"
+#include "utilities/debug.hpp"
+
+// ======= Concurrent Mark Thread ========
+
+// Check order in EXPAND_CURRENT_PHASES
+STATIC_ASSERT(ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE <
+              ConcurrentGCPhaseManager::IDLE_PHASE);
+
+#define EXPAND_CONCURRENT_PHASES(expander)                                 \
+  expander(ANY, = ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE, NULL)     \
+  expander(IDLE, = ConcurrentGCPhaseManager::IDLE_PHASE, NULL)             \
+  expander(CONCURRENT_CYCLE,, "Concurrent Cycle")                          \
+  expander(CLEAR_CLAIMED_MARKS,, "Concurrent Clear Claimed Marks")         \
+  expander(SCAN_ROOT_REGIONS,, "Concurrent Scan Root Regions")             \
+  expander(CONCURRENT_MARK,, "Concurrent Mark")                            \
+  expander(MARK_FROM_ROOTS,, "Concurrent Mark From Roots")                 \
+  expander(BEFORE_REMARK,, NULL)                                           \
+  expander(REMARK,, NULL)                                                  \
+  expander(REBUILD_REMEMBERED_SETS,, "Concurrent Rebuild Remembered Sets") \
+  expander(CLEANUP_FOR_NEXT_MARK,, "Concurrent Cleanup for Next Mark")     \
+  /* */
+
+class G1ConcurrentPhase : public AllStatic {
+public:
+  enum {
+#define CONCURRENT_PHASE_ENUM(tag, value, ignore_title) tag value,
+    EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_ENUM)
+#undef CONCURRENT_PHASE_ENUM
+    PHASE_ID_LIMIT
+  };
+};
+
+// The CM thread is created when the G1 garbage collector is used
+
+G1ConcurrentMarkThread::G1ConcurrentMarkThread(G1ConcurrentMark* cm) :
+  ConcurrentGCThread(),
+  _cm(cm),
+  _state(Idle),
+  _phase_manager_stack(),
+  _vtime_accum(0.0),
+  _vtime_mark_accum(0.0) {
+
+  set_name("G1 Main Marker");
+  create_and_start();
+}
+
+class CMRemark : public VoidClosure {
+  G1ConcurrentMark* _cm;
+public:
+  CMRemark(G1ConcurrentMark* cm) : _cm(cm) {}
+
+  void do_void(){
+    _cm->remark();
+  }
+};
+
+class CMCleanup : public VoidClosure {
+  G1ConcurrentMark* _cm;
+public:
+  CMCleanup(G1ConcurrentMark* cm) : _cm(cm) {}
+
+  void do_void(){
+    _cm->cleanup();
+  }
+};
+
+double G1ConcurrentMarkThread::mmu_sleep_time(G1Policy* g1_policy, bool remark) {
+  // There are 3 reasons to use SuspendibleThreadSetJoiner.
+  // 1. To avoid concurrency problem.
+  //    - G1MMUTracker::add_pause(), when_sec() and its variation(when_ms() etc..) can be called
+  //      concurrently from ConcurrentMarkThread and VMThread.
+  // 2. If currently a gc is running, but it has not yet updated the MMU,
+  //    we will not forget to consider that pause in the MMU calculation.
+  // 3. If currently a gc is running, ConcurrentMarkThread will wait it to be finished.
+  //    And then sleep for predicted amount of time by delay_to_keep_mmu().
+  SuspendibleThreadSetJoiner sts_join;
+
+  const G1Analytics* analytics = g1_policy->analytics();
+  double now = os::elapsedTime();
+  double prediction_ms = remark ? analytics->predict_remark_time_ms()
+                                : analytics->predict_cleanup_time_ms();
+  G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
+  return mmu_tracker->when_ms(now, prediction_ms);
+}
+
+void G1ConcurrentMarkThread::delay_to_keep_mmu(G1Policy* g1_policy, bool remark) {
+  if (g1_policy->adaptive_young_list_length()) {
+    jlong sleep_time_ms = mmu_sleep_time(g1_policy, remark);
+    if (!_cm->has_aborted() && sleep_time_ms > 0) {
+      os::sleep(this, sleep_time_ms, false);
+    }
+  }
+}
+
+class G1ConcPhaseTimer : public GCTraceConcTimeImpl<LogLevel::Info, LOG_TAGS(gc, marking)> {
+  G1ConcurrentMark* _cm;
+
+ public:
+  G1ConcPhaseTimer(G1ConcurrentMark* cm, const char* title) :
+    GCTraceConcTimeImpl<LogLevel::Info,  LogTag::_gc, LogTag::_marking>(title),
+    _cm(cm)
+  {
+    _cm->gc_timer_cm()->register_gc_concurrent_start(title);
+  }
+
+  ~G1ConcPhaseTimer() {
+    _cm->gc_timer_cm()->register_gc_concurrent_end();
+  }
+};
+
+static const char* const concurrent_phase_names[] = {
+#define CONCURRENT_PHASE_NAME(tag, ignore_value, ignore_title) XSTR(tag),
+  EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_NAME)
+#undef CONCURRENT_PHASE_NAME
+  NULL                          // terminator
+};
+// Verify dense enum assumption.  +1 for terminator.
+STATIC_ASSERT(G1ConcurrentPhase::PHASE_ID_LIMIT + 1 ==
+              ARRAY_SIZE(concurrent_phase_names));
+
+// Returns the phase number for name, or a negative value if unknown.
+static int lookup_concurrent_phase(const char* name) {
+  const char* const* names = concurrent_phase_names;
+  for (uint i = 0; names[i] != NULL; ++i) {
+    if (strcmp(name, names[i]) == 0) {
+      return static_cast<int>(i);
+    }
+  }
+  return -1;
+}
+
+// The phase must be valid and must have a title.
+static const char* lookup_concurrent_phase_title(int phase) {
+  static const char* const titles[] = {
+#define CONCURRENT_PHASE_TITLE(ignore_tag, ignore_value, title) title,
+    EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_TITLE)
+#undef CONCURRENT_PHASE_TITLE
+  };
+  // Verify dense enum assumption.
+  STATIC_ASSERT(G1ConcurrentPhase::PHASE_ID_LIMIT == ARRAY_SIZE(titles));
+
+  assert(0 <= phase, "precondition");
+  assert((uint)phase < ARRAY_SIZE(titles), "precondition");
+  const char* title = titles[phase];
+  assert(title != NULL, "precondition");
+  return title;
+}
+
+class G1ConcPhaseManager : public StackObj {
+  G1ConcurrentMark* _cm;
+  ConcurrentGCPhaseManager _manager;
+
+public:
+  G1ConcPhaseManager(int phase, G1ConcurrentMarkThread* thread) :
+    _cm(thread->cm()),
+    _manager(phase, thread->phase_manager_stack())
+  { }
+
+  ~G1ConcPhaseManager() {
+    // Deactivate the manager if marking aborted, to avoid blocking on
+    // phase exit when the phase has been requested.
+    if (_cm->has_aborted()) {
+      _manager.deactivate();
+    }
+  }
+
+  void set_phase(int phase, bool force) {
+    _manager.set_phase(phase, force);
+  }
+};
+
+// Combine phase management and timing into one convenient utility.
+class G1ConcPhase : public StackObj {
+  G1ConcPhaseTimer _timer;
+  G1ConcPhaseManager _manager;
+
+public:
+  G1ConcPhase(int phase, G1ConcurrentMarkThread* thread) :
+    _timer(thread->cm(), lookup_concurrent_phase_title(phase)),
+    _manager(phase, thread)
+  { }
+};
+
+const char* const* G1ConcurrentMarkThread::concurrent_phases() const {
+  return concurrent_phase_names;
+}
+
+bool G1ConcurrentMarkThread::request_concurrent_phase(const char* phase_name) {
+  int phase = lookup_concurrent_phase(phase_name);
+  if (phase < 0) return false;
+
+  while (!ConcurrentGCPhaseManager::wait_for_phase(phase,
+                                                   phase_manager_stack())) {
+    assert(phase != G1ConcurrentPhase::ANY, "Wait for ANY phase must succeed");
+    if ((phase != G1ConcurrentPhase::IDLE) && !during_cycle()) {
+      // If idle and the goal is !idle, start a collection.
+      G1CollectedHeap::heap()->collect(GCCause::_wb_conc_mark);
+    }
+  }
+  return true;
+}
+
+void G1ConcurrentMarkThread::run_service() {
+  _vtime_start = os::elapsedVTime();
+
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  G1Policy* g1_policy = g1h->g1_policy();
+
+  G1ConcPhaseManager cpmanager(G1ConcurrentPhase::IDLE, this);
+
+  while (!should_terminate()) {
+    // wait until started is set.
+    sleep_before_next_cycle();
+    if (should_terminate()) {
+      break;
+    }
+
+    cpmanager.set_phase(G1ConcurrentPhase::CONCURRENT_CYCLE, false /* force */);
+
+    GCIdMark gc_id_mark;
+
+    _cm->concurrent_cycle_start();
+
+    GCTraceConcTime(Info, gc) tt("Concurrent Cycle");
+    {
+      ResourceMark rm;
+      HandleMark   hm;
+      double cycle_start = os::elapsedVTime();
+
+      {
+        G1ConcPhase p(G1ConcurrentPhase::CLEAR_CLAIMED_MARKS, this);
+        ClassLoaderDataGraph::clear_claimed_marks();
+      }
+
+      // We have to ensure that we finish scanning the root regions
+      // before the next GC takes place. To ensure this we have to
+      // make sure that we do not join the STS until the root regions
+      // have been scanned. If we did then it's possible that a
+      // subsequent GC could block us from joining the STS and proceed
+      // without the root regions have been scanned which would be a
+      // correctness issue.
+
+      {
+        G1ConcPhase p(G1ConcurrentPhase::SCAN_ROOT_REGIONS, this);
+        _cm->scan_root_regions();
+      }
+
+      // It would be nice to use the G1ConcPhase class here but
+      // the "end" logging is inside the loop and not at the end of
+      // a scope. Also, the timer doesn't support nesting.
+      // Mimicking the same log output instead.
+      {
+        G1ConcPhaseManager mark_manager(G1ConcurrentPhase::CONCURRENT_MARK, this);
+        jlong mark_start = os::elapsed_counter();
+        const char* cm_title = lookup_concurrent_phase_title(G1ConcurrentPhase::CONCURRENT_MARK);
+        log_info(gc, marking)("%s (%.3fs)",
+                              cm_title,
+                              TimeHelper::counter_to_seconds(mark_start));
+        for (uint iter = 1; !_cm->has_aborted(); ++iter) {
+          // Concurrent marking.
+          {
+            G1ConcPhase p(G1ConcurrentPhase::MARK_FROM_ROOTS, this);
+            _cm->mark_from_roots();
+          }
+          if (_cm->has_aborted()) {
+            break;
+          }
+
+          // Provide a control point after mark_from_roots.
+          {
+            G1ConcPhaseManager p(G1ConcurrentPhase::BEFORE_REMARK, this);
+          }
+          if (_cm->has_aborted()) {
+            break;
+          }
+
+          // Delay remark pause for MMU.
+          double mark_end_time = os::elapsedVTime();
+          jlong mark_end = os::elapsed_counter();
+          _vtime_mark_accum += (mark_end_time - cycle_start);
+          delay_to_keep_mmu(g1_policy, true /* remark */);
+          if (_cm->has_aborted()) {
+            break;
+          }
+
+          // Pause Remark.
+          log_info(gc, marking)("%s (%.3fs, %.3fs) %.3fms",
+                                cm_title,
+                                TimeHelper::counter_to_seconds(mark_start),
+                                TimeHelper::counter_to_seconds(mark_end),
+                                TimeHelper::counter_to_millis(mark_end - mark_start));
+          mark_manager.set_phase(G1ConcurrentPhase::REMARK, false);
+          CMRemark cl(_cm);
+          VM_CGC_Operation op(&cl, "Pause Remark");
+          VMThread::execute(&op);
+          if (_cm->has_aborted()) {
+            break;
+          } else if (!_cm->restart_for_overflow()) {
+            break;              // Exit loop if no restart requested.
+          } else {
+            // Loop to restart for overflow.
+            mark_manager.set_phase(G1ConcurrentPhase::CONCURRENT_MARK, false);
+            log_info(gc, marking)("%s Restart for Mark Stack Overflow (iteration #%u)",
+                                  cm_title, iter);
+          }
+        }
+      }
+
+      if (!_cm->has_aborted()) {
+        G1ConcPhase p(G1ConcurrentPhase::REBUILD_REMEMBERED_SETS, this);
+        _cm->rebuild_rem_set_concurrently();
+      }
+
+      double end_time = os::elapsedVTime();
+      // Update the total virtual time before doing this, since it will try
+      // to measure it to get the vtime for this marking.
+      _vtime_accum = (end_time - _vtime_start);
+
+      if (!_cm->has_aborted()) {
+        delay_to_keep_mmu(g1_policy, false /* cleanup */);
+      }
+
+      if (!_cm->has_aborted()) {
+        CMCleanup cl_cl(_cm);
+        VM_CGC_Operation op(&cl_cl, "Pause Cleanup");
+        VMThread::execute(&op);
+      }
+
+      // We now want to allow clearing of the marking bitmap to be
+      // suspended by a collection pause.
+      // We may have aborted just before the remark. Do not bother clearing the
+      // bitmap then, as it has been done during mark abort.
+      if (!_cm->has_aborted()) {
+        G1ConcPhase p(G1ConcurrentPhase::CLEANUP_FOR_NEXT_MARK, this);
+        _cm->cleanup_for_next_mark();
+      } else {
+        assert(!G1VerifyBitmaps || _cm->next_mark_bitmap_is_clear(), "Next mark bitmap must be clear");
+      }
+    }
+
+    // Update the number of full collections that have been
+    // completed. This will also notify the FullGCCount_lock in case a
+    // Java thread is waiting for a full GC to happen (e.g., it
+    // called System.gc() with +ExplicitGCInvokesConcurrent).
+    {
+      SuspendibleThreadSetJoiner sts_join;
+      g1h->increment_old_marking_cycles_completed(true /* concurrent */);
+
+      _cm->concurrent_cycle_end();
+    }
+
+    cpmanager.set_phase(G1ConcurrentPhase::IDLE, _cm->has_aborted() /* force */);
+  }
+  _cm->root_regions()->cancel_scan();
+}
+
+void G1ConcurrentMarkThread::stop_service() {
+  MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
+  CGC_lock->notify_all();
+}
+
+
+void G1ConcurrentMarkThread::sleep_before_next_cycle() {
+  // We join here because we don't want to do the "shouldConcurrentMark()"
+  // below while the world is otherwise stopped.
+  assert(!in_progress(), "should have been cleared");
+
+  MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
+  while (!started() && !should_terminate()) {
+    CGC_lock->wait(Mutex::_no_safepoint_check_flag);
+  }
+
+  if (started()) {
+    set_in_progress();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1CONCURRENTMARKTHREAD_HPP
+#define SHARE_VM_GC_G1_G1CONCURRENTMARKTHREAD_HPP
+
+#include "gc/shared/concurrentGCPhaseManager.hpp"
+#include "gc/shared/concurrentGCThread.hpp"
+
+class G1ConcurrentMark;
+class G1Policy;
+
+// The concurrent mark thread triggers the various steps of the concurrent marking
+// cycle, including various marking cleanup.
+class G1ConcurrentMarkThread: public ConcurrentGCThread {
+  friend class VMStructs;
+
+  double _vtime_start;  // Initial virtual time.
+  double _vtime_accum;  // Accumulated virtual time.
+  double _vtime_mark_accum;
+
+  G1ConcurrentMark* _cm;
+
+  enum State {
+    Idle,
+    Started,
+    InProgress
+  };
+
+  volatile State _state;
+
+  // WhiteBox testing support.
+  ConcurrentGCPhaseManager::Stack _phase_manager_stack;
+
+  void sleep_before_next_cycle();
+  // Delay marking to meet MMU.
+  void delay_to_keep_mmu(G1Policy* g1_policy, bool remark);
+  double mmu_sleep_time(G1Policy* g1_policy, bool remark);
+
+  void run_service();
+  void stop_service();
+
+ public:
+  // Constructor
+  G1ConcurrentMarkThread(G1ConcurrentMark* cm);
+
+  // Total virtual time so far for this thread and concurrent marking tasks.
+  double vtime_accum();
+  // Marking virtual time so far this thread and concurrent marking tasks.
+  double vtime_mark_accum();
+
+  G1ConcurrentMark* cm()   { return _cm; }
+
+  void set_idle()          { assert(_state != Started, "must not be starting a new cycle"); _state = Idle; }
+  bool idle()              { return _state == Idle; }
+  void set_started()       { assert(_state == Idle, "cycle in progress"); _state = Started; }
+  bool started()           { return _state == Started; }
+  void set_in_progress()   { assert(_state == Started, "must be starting a cycle"); _state = InProgress; }
+  bool in_progress()       { return _state == InProgress; }
+
+  // Returns true from the moment a marking cycle is
+  // initiated (during the initial-mark pause when started() is set)
+  // to the moment when the cycle completes (just after the next
+  // marking bitmap has been cleared and in_progress() is
+  // cleared). While during_cycle() is true we will not start another cycle
+  // so that cycles do not overlap. We cannot use just in_progress()
+  // as the CM thread might take some time to wake up before noticing
+  // that started() is set and set in_progress().
+  bool during_cycle()      { return !idle(); }
+
+  // WhiteBox testing support.
+  const char* const* concurrent_phases() const;
+  bool request_concurrent_phase(const char* phase);
+
+  ConcurrentGCPhaseManager::Stack* phase_manager_stack() {
+    return &_phase_manager_stack;
+  }
+};
+
+#endif // SHARE_VM_GC_G1_G1CONCURRENTMARKTHREAD_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1CONCURRENTMARKTHREAD_INLINE_HPP
+#define SHARE_VM_GC_G1_G1CONCURRENTMARKTHREAD_INLINE_HPP
+
+#include "gc/g1/g1ConcurrentMark.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.hpp"
+
+  // Total virtual time so far.
+inline double G1ConcurrentMarkThread::vtime_accum() {
+  return _vtime_accum + _cm->all_task_accum_vtime();
+}
+
+// Marking virtual time so far
+inline double G1ConcurrentMarkThread::vtime_mark_accum() {
+  return _vtime_mark_accum + _cm->all_task_accum_vtime();
+}
+
+#endif // SHARE_VM_GC_G1_G1CONCURRENTMARKTHREAD_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1EvacFailure.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1EvacFailure.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -34,6 +34,8 @@
 #include "gc/g1/heapRegion.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/shared/preservedMarks.inline.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 
 class UpdateRSetDeferred : public ExtendedOopClosure {
 private:
@@ -51,12 +53,12 @@
     assert(_g1->heap_region_containing(p)->is_in_reserved(p), "paranoia");
     assert(!_g1->heap_region_containing(p)->is_survivor(), "Unexpected evac failure in survivor region");
 
-    T const o = oopDesc::load_heap_oop(p);
-    if (oopDesc::is_null(o)) {
+    T const o = RawAccess<>::oop_load(p);
+    if (CompressedOops::is_null(o)) {
       return;
     }
 
-    if (HeapRegion::is_in_same_region(p, oopDesc::decode_heap_oop(o))) {
+    if (HeapRegion::is_in_same_region(p, CompressedOops::decode(o))) {
       return;
     }
     size_t card_index = _ct->index_for(p);
@@ -124,7 +126,7 @@
         // explicitly and all objects in the CSet are considered
         // (implicitly) live. So, we won't mark them explicitly and
         // we'll leave them over NTAMS.
-        _cm->mark_in_next_bitmap(_hr, obj);
+        _cm->mark_in_next_bitmap(_worker_id, obj);
       }
       size_t obj_size = obj->size();
 
@@ -226,8 +228,8 @@
 
     if (_hrclaimer->claim_region(hr->hrm_index())) {
       if (hr->evacuation_failed()) {
-        bool during_initial_mark = _g1h->collector_state()->during_initial_mark_pause();
-        bool during_conc_mark = _g1h->collector_state()->mark_in_progress();
+        bool during_initial_mark = _g1h->collector_state()->in_initial_mark_gc();
+        bool during_conc_mark = _g1h->collector_state()->mark_or_rebuild_in_progress();
 
         hr->note_self_forwarding_removal_start(during_initial_mark,
                                                during_conc_mark);
@@ -238,6 +240,7 @@
         size_t live_bytes = remove_self_forward_ptr_by_walking_hr(hr, during_initial_mark);
 
         hr->rem_set()->clean_strong_code_roots(hr);
+        hr->rem_set()->clear_locked(true);
 
         hr->note_self_forwarding_removal_end(live_bytes);
       }
--- a/src/hotspot/share/gc/g1/g1FromCardCache.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1FromCardCache.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -28,9 +28,9 @@
 #include "memory/padded.inline.hpp"
 #include "utilities/debug.hpp"
 
-int**  G1FromCardCache::_cache = NULL;
-uint   G1FromCardCache::_max_regions = 0;
-size_t G1FromCardCache::_static_mem_size = 0;
+uintptr_t** G1FromCardCache::_cache = NULL;
+uint        G1FromCardCache::_max_regions = 0;
+size_t      G1FromCardCache::_static_mem_size = 0;
 #ifdef ASSERT
 uint   G1FromCardCache::_max_workers = 0;
 #endif
@@ -43,9 +43,9 @@
 #ifdef ASSERT
   _max_workers = num_par_rem_sets;
 #endif
-  _cache = Padded2DArray<int, mtGC>::create_unfreeable(_max_regions,
-                                                       num_par_rem_sets,
-                                                       &_static_mem_size);
+  _cache = Padded2DArray<uintptr_t, mtGC>::create_unfreeable(_max_regions,
+                                                             num_par_rem_sets,
+                                                             &_static_mem_size);
 
   invalidate(0, _max_regions);
 }
@@ -68,7 +68,7 @@
 void G1FromCardCache::print(outputStream* out) {
   for (uint i = 0; i < G1RemSet::num_par_rem_sets(); i++) {
     for (uint j = 0; j < _max_regions; j++) {
-      out->print_cr("_from_card_cache[%u][%u] = %d.",
+      out->print_cr("_from_card_cache[%u][%u] = " SIZE_FORMAT ".",
                     i, j, at(i, j));
     }
   }
--- a/src/hotspot/share/gc/g1/g1FromCardCache.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1FromCardCache.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -37,7 +37,7 @@
   // This order minimizes the time to clear all entries for a given region during region
   // freeing. I.e. a single clear of a single memory area instead of multiple separate
   // accesses with a large stride per region.
-  static int** _cache;
+  static uintptr_t** _cache;
   static uint _max_regions;
   static size_t _static_mem_size;
 #ifdef ASSERT
@@ -50,16 +50,14 @@
 #endif
 
  public:
-  enum {
-    InvalidCard = -1 // Card value of an invalid card, i.e. a card index not otherwise used.
-  };
+  static const uintptr_t InvalidCard = UINTPTR_MAX;
 
   static void clear(uint region_idx);
 
   // Returns true if the given card is in the cache at the given location, or
   // replaces the card at that location and returns false.
-  static bool contains_or_replace(uint worker_id, uint region_idx, int card) {
-    int card_in_cache = at(worker_id, region_idx);
+  static bool contains_or_replace(uint worker_id, uint region_idx, uintptr_t card) {
+    uintptr_t card_in_cache = at(worker_id, region_idx);
     if (card_in_cache == card) {
       return true;
     } else {
@@ -68,12 +66,12 @@
     }
   }
 
-  static int at(uint worker_id, uint region_idx) {
+  static uintptr_t at(uint worker_id, uint region_idx) {
     DEBUG_ONLY(check_bounds(worker_id, region_idx);)
     return _cache[region_idx][worker_id];
   }
 
-  static void set(uint worker_id, uint region_idx, int val) {
+  static void set(uint worker_id, uint region_idx, uintptr_t val) {
     DEBUG_ONLY(check_bounds(worker_id, region_idx);)
     _cache[region_idx][worker_id] = val;
   }
--- a/src/hotspot/share/gc/g1/g1FullCollector.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -213,7 +213,7 @@
 
 void G1FullCollector::phase3_adjust_pointers() {
   // Adjust the pointers to reflect the new locations
-  GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers and remembered sets", scope()->timer());
+  GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer());
 
   G1FullGCAdjustTask task(this);
   run_task(&task);
--- a/src/hotspot/share/gc/g1/g1FullCollector.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1FullCollector.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,13 +37,12 @@
 #include "utilities/ticks.inline.hpp"
 
 class G1AdjustLiveClosure : public StackObj {
-  G1AdjustAndRebuildClosure* _adjust_closure;
+  G1AdjustClosure* _adjust_closure;
 public:
-  G1AdjustLiveClosure(G1AdjustAndRebuildClosure* cl) :
+  G1AdjustLiveClosure(G1AdjustClosure* cl) :
     _adjust_closure(cl) { }
 
   size_t apply(oop object) {
-    _adjust_closure->update_compaction_delta(object);
     return object->oop_iterate_size(_adjust_closure);
   }
 };
@@ -57,10 +56,9 @@
     _worker_id(worker_id) { }
 
   bool do_heap_region(HeapRegion* r) {
-    G1AdjustAndRebuildClosure cl(_worker_id);
+    G1AdjustClosure cl;
     if (r->is_humongous()) {
       oop obj = oop(r->humongous_start_region()->bottom());
-      cl.update_compaction_delta(obj);
       obj->oop_iterate(&cl, MemRegion(r->bottom(), r->top()));
     } else if (r->is_open_archive()) {
       // Only adjust the open archive regions, the closed ones
@@ -79,7 +77,7 @@
 };
 
 G1FullGCAdjustTask::G1FullGCAdjustTask(G1FullCollector* collector) :
-    G1FullGCTask("G1 Adjust and Rebuild", collector),
+    G1FullGCTask("G1 Adjust", collector),
     _root_processor(G1CollectedHeap::heap(), collector->workers()),
     _hrclaimer(collector->workers()),
     _adjust(),
@@ -115,5 +113,5 @@
   // Now adjust pointers region by region
   G1AdjustRegionClosure blk(collector()->mark_bitmap(), worker_id);
   G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&blk, &_hrclaimer, worker_id);
-  log_task("Adjust and Rebuild task", worker_id, start);
+  log_task("Adjust task", worker_id, start);
 }
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -31,6 +31,8 @@
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/g1StringDedupQueue.hpp"
 #include "gc/shared/preservedMarks.inline.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "utilities/debug.hpp"
 
 inline bool G1FullGCMarker::mark_object(oop obj) {
@@ -60,9 +62,9 @@
 }
 
 template <class T> inline void G1FullGCMarker::mark_and_push(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     if (mark_object(obj)) {
       _oop_stack.push(obj);
       assert(_bitmap->is_marked(obj), "Must be marked now - map self");
--- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,8 @@
 #include "gc/g1/g1FullGCOopClosures.inline.hpp"
 #include "gc/g1/g1_specialized_oop_closures.hpp"
 #include "logging/logStream.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 
 void G1MarkAndPushClosure::do_oop(oop* p) {
   do_oop_nv(p);
@@ -49,32 +51,6 @@
   do_cld_nv(cld);
 }
 
-G1AdjustAndRebuildClosure::G1AdjustAndRebuildClosure(uint worker_id) :
-  _worker_id(worker_id),
-  _compaction_delta(0),
-  _g1h(G1CollectedHeap::heap()) { }
-
-void G1AdjustAndRebuildClosure::update_compaction_delta(oop obj) {
-  if (G1ArchiveAllocator::is_open_archive_object(obj)) {
-    _compaction_delta = 0;
-    return;
-  }
-  oop forwardee = obj->forwardee();
-  if (forwardee == NULL) {
-    // Object not moved.
-    _compaction_delta = 0;
-  } else {
-    // Object moved to forwardee, calculate delta.
-    _compaction_delta = calculate_compaction_delta(obj, forwardee);
-  }
-}
-
-void G1AdjustClosure::do_oop(oop* p)       { adjust_pointer(p); }
-void G1AdjustClosure::do_oop(narrowOop* p) { adjust_pointer(p); }
-
-void G1AdjustAndRebuildClosure::do_oop(oop* p)       { do_oop_nv(p); }
-void G1AdjustAndRebuildClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
-
 void G1FollowStackClosure::do_void() { _marker->drain_stack(); }
 
 void G1FullKeepAliveClosure::do_oop(oop* p) { do_oop_work(p); }
@@ -99,10 +75,10 @@
 }
 
 template <class T> void G1VerifyOopClosure::do_oop_nv(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(heap_oop)) {
     _cc++;
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     bool failed = false;
     if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _verify_option)) {
       MutexLockerEx x(ParGCRareEvent_lock,
--- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -79,43 +79,16 @@
   void do_cld_nv(ClassLoaderData* cld);
 };
 
-class G1AdjustClosure : public OopClosure {
+class G1AdjustClosure : public ExtendedOopClosure {
+  template <class T> static inline void adjust_pointer(T* p);
 public:
-  template <class T> static inline oop adjust_pointer(T* p);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-class G1AdjustAndRebuildClosure : public ExtendedOopClosure {
-  uint _worker_id;
-  size_t _compaction_delta;
-  G1CollectedHeap* _g1h;
-
-  inline size_t calculate_compaction_delta(oop current, oop forwardee);
-  template <class T> inline T* add_compaction_delta(T* p);
-
-public:
-  G1AdjustAndRebuildClosure(uint worker_id);
-
-  void update_compaction_delta(oop obj);
-
-  template <class T> inline void add_reference(T* from_field, oop reference, uint worker_id);
-  template <class T> void do_oop_nv(T* p);
+  template <class T> void do_oop_nv(T* p) { adjust_pointer(p); }
   virtual void do_oop(oop* p);
   virtual void do_oop(narrowOop* p);
 
   virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
 };
 
-class G1AdjustObjectClosure {
-  G1AdjustAndRebuildClosure* _closure;
-
-public:
-  G1AdjustObjectClosure(G1AdjustAndRebuildClosure* cl) : _closure(cl) { }
-
-  inline int adjust_object(oop obj);
-};
-
 class G1VerifyOopClosure: public OopClosure {
 private:
   G1CollectedHeap* _g1h;
--- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,8 @@
 #include "gc/g1/g1FullGCOopClosures.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "memory/iterator.inline.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 
 template <typename T>
 inline void G1MarkAndPushClosure::do_oop_nv(T* p) {
@@ -49,18 +51,17 @@
   _marker->follow_cld(cld);
 }
 
-template <class T> inline oop G1AdjustClosure::adjust_pointer(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (oopDesc::is_null(heap_oop)) {
-    // NULL reference, return NULL.
-    return NULL;
+template <class T> inline void G1AdjustClosure::adjust_pointer(T* p) {
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (CompressedOops::is_null(heap_oop)) {
+    return;
   }
 
-  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  oop obj = CompressedOops::decode_not_null(heap_oop);
   assert(Universe::heap()->is_in(obj), "should be in heap");
   if (G1ArchiveAllocator::is_archive_object(obj)) {
-    // Never forwarding archive objects, return current reference.
-    return obj;
+    // We never forward archive objects.
+    return;
   }
 
   oop forwardee = obj->forwardee();
@@ -71,50 +72,16 @@
            (UseBiasedLocking && obj->has_bias_pattern()), // Will be restored by BiasedLocking
            "Must have correct prototype or be preserved, obj: " PTR_FORMAT ", mark: " PTR_FORMAT ", prototype: " PTR_FORMAT,
            p2i(obj), p2i(obj->mark()), p2i(markOopDesc::prototype_for_object(obj)));
-    return obj;
-  }
-
-  // Forwarded, update and return new reference.
-  assert(Universe::heap()->is_in_reserved(forwardee), "should be in object space");
-  oopDesc::encode_store_heap_oop_not_null(p, forwardee);
-  return forwardee;
-}
-
-template <class T>
-inline void G1AdjustAndRebuildClosure::add_reference(T* from_field, oop reference, uint worker_id) {
-  if (HeapRegion::is_in_same_region(from_field, reference)) {
-    return;
-  }
-  _g1h->heap_region_containing(reference)->rem_set()->add_reference(from_field, worker_id);
-}
-
-inline size_t G1AdjustAndRebuildClosure::calculate_compaction_delta(oop current, oop forwardee) {
-  return pointer_delta((HeapWord*)forwardee, (HeapWord*)current);
-}
-
-template <class T>
-inline T* G1AdjustAndRebuildClosure::add_compaction_delta(T* p) {
-  return (T*)((HeapWord*)p + _compaction_delta);
-}
-
-template<typename T>
-void G1AdjustAndRebuildClosure::do_oop_nv(T* p) {
-  oop new_reference = G1AdjustClosure::adjust_pointer(p);
-  if (new_reference == NULL) {
     return;
   }
 
-  // Update p using the calculated compaction delta to
-  // get the new field address.
-  T* new_field = add_compaction_delta(p);
-  // Update the remembered set.
-  add_reference(new_field, new_reference, _worker_id);
+  // Forwarded, just update.
+  assert(Universe::heap()->is_in_reserved(forwardee), "should be in object space");
+  RawAccess<OOP_NOT_NULL>::oop_store(p, forwardee);
 }
 
-inline int G1AdjustObjectClosure::adjust_object(oop obj) {
-  _closure->update_compaction_delta(obj);
-  return obj->oop_iterate_size(_closure);
-}
+inline void G1AdjustClosure::do_oop(oop* p)       { do_oop_nv(p); }
+inline void G1AdjustClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
 
 inline bool G1IsAliveClosure::do_object_b(oop p) {
   return _bitmap->is_marked(p) || G1ArchiveAllocator::is_closed_archive_object(p);
--- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -103,16 +103,14 @@
   hr->set_containing_set(NULL);
   _humongous_regions_removed++;
 
-  _g1h->free_humongous_region(hr, &dummy_free_list, false /* skip_remset */);
+  _g1h->free_humongous_region(hr, &dummy_free_list);
   prepare_for_compaction(hr);
   dummy_free_list.remove_all();
 }
 
 void G1FullGCPrepareTask::G1CalculatePointersClosure::reset_region_metadata(HeapRegion* hr) {
-  hr->reset_gc_time_stamp();
   hr->rem_set()->clear();
-
-  _g1h->card_table()->clear(MemRegion(hr->bottom(), hr->end()));
+  hr->clear_cardtable();
 
   if (_g1h->g1_hot_card_cache()->use_cache()) {
     _g1h->g1_hot_card_cache()->reset_card_counts(hr);
--- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -23,10 +23,10 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/g1/concurrentMarkThread.hpp"
 #include "gc/g1/g1Allocator.inline.hpp"
 #include "gc/g1/g1CollectedHeap.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.hpp"
 #include "gc/g1/g1HeapVerifier.hpp"
 #include "gc/g1/g1Policy.hpp"
 #include "gc/g1/g1RemSet.hpp"
@@ -38,9 +38,13 @@
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/handles.inline.hpp"
 
+int G1HeapVerifier::_enabled_verification_types = G1HeapVerifier::G1VerifyAll;
+
 class VerifyRootsClosure: public OopClosure {
 private:
   G1CollectedHeap* _g1h;
@@ -58,9 +62,9 @@
   bool failures() { return _failures; }
 
   template <class T> void do_oop_nv(T* p) {
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    T heap_oop = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(heap_oop)) {
+      oop obj = CompressedOops::decode_not_null(heap_oop);
       if (_g1h->is_obj_dead_cond(obj, _vo)) {
         Log(gc, verify) log;
         log.error("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
@@ -101,9 +105,9 @@
     // in the code root list of the heap region containing the
     // object referenced by p.
 
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    T heap_oop = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(heap_oop)) {
+      oop obj = CompressedOops::decode_not_null(heap_oop);
 
       // Now fetch the region containing the object
       HeapRegion* hr = _g1h->heap_region_containing(obj);
@@ -186,7 +190,7 @@
   void do_oop(      oop *p) { do_oop_work(p); }
 
   template <class T> void do_oop_work(T *p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
+    oop obj = RawAccess<>::oop_load(p);
     guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
               "Dead object referenced by a not dead object");
   }
@@ -240,7 +244,7 @@
   void do_oop(      oop *p) { do_oop_work(p); }
 
   template <class T> void do_oop_work(T *p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
+    oop obj = RawAccess<>::oop_load(p);
 
     if (_hr->is_open_archive()) {
       guarantee(obj == NULL || G1ArchiveAllocator::is_archive_object(obj),
@@ -308,6 +312,9 @@
   }
 
   bool do_heap_region(HeapRegion* r) {
+    guarantee(!r->is_young() || r->rem_set()->is_complete(), "Remembered set for Young region %u must be complete, is %s", r->hrm_index(), r->rem_set()->get_state_str());
+    // Humongous and old regions regions might be of any state, so can't check here.
+    guarantee(!r->is_free() || !r->rem_set()->is_tracked(), "Remembered set for free region %u must be untracked, is %s", r->hrm_index(), r->rem_set()->get_state_str());
     // For archive regions, verify there are no heap pointers to
     // non-pinned regions. For all others, verify liveness info.
     if (r->is_closed_archive()) {
@@ -377,25 +384,6 @@
   }
 };
 
-void G1HeapVerifier::parse_verification_type(const char* type) {
-  if (strcmp(type, "young-only") == 0) {
-    enable_verification_type(G1VerifyYoungOnly);
-  } else if (strcmp(type, "initial-mark") == 0) {
-    enable_verification_type(G1VerifyInitialMark);
-  } else if (strcmp(type, "mixed") == 0) {
-    enable_verification_type(G1VerifyMixed);
-  } else if (strcmp(type, "remark") == 0) {
-    enable_verification_type(G1VerifyRemark);
-  } else if (strcmp(type, "cleanup") == 0) {
-    enable_verification_type(G1VerifyCleanup);
-  } else if (strcmp(type, "full") == 0) {
-    enable_verification_type(G1VerifyFull);
-  } else {
-    log_warning(gc, verify)("VerifyGCType: '%s' is unknown. Available types are: "
-                            "young-only, initial-mark, mixed, remark, cleanup and full", type);
-  }
-}
-
 void G1HeapVerifier::enable_verification_type(G1VerifyType type) {
   // First enable will clear _enabled_verification_types.
   if (_enabled_verification_types == G1VerifyAll) {
@@ -436,7 +424,7 @@
 
   bool failures = rootsCl.failures() || codeRootsCl.failures();
 
-  if (!_g1h->g1_policy()->collector_state()->full_collection()) {
+  if (!_g1h->g1_policy()->collector_state()->in_full_gc()) {
     // If we're verifying during a full GC then the region sets
     // will have been torn down at the start of the GC. Therefore
     // verifying the region sets will fail. So we only verify
@@ -468,7 +456,7 @@
   }
 
   if (failures) {
-    log_error(gc, verify)("Heap after failed verification:");
+    log_error(gc, verify)("Heap after failed verification (kind %d):", vo);
     // It helps to have the per-region information in the output to
     // help us track down what went wrong. This is why we call
     // print_extended_on() instead of print_on().
@@ -532,32 +520,6 @@
 
   // First, check the explicit lists.
   _g1h->_hrm.verify();
-  {
-    // Given that a concurrent operation might be adding regions to
-    // the secondary free list we have to take the lock before
-    // verifying it.
-    MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
-    _g1h->_secondary_free_list.verify_list();
-  }
-
-  // If a concurrent region freeing operation is in progress it will
-  // be difficult to correctly attributed any free regions we come
-  // across to the correct free list given that they might belong to
-  // one of several (free_list, secondary_free_list, any local lists,
-  // etc.). So, if that's the case we will skip the rest of the
-  // verification operation. Alternatively, waiting for the concurrent
-  // operation to complete will have a non-trivial effect on the GC's
-  // operation (no concurrent operation will last longer than the
-  // interval between two calls to verification) and it might hide
-  // any issues that we would like to catch during testing.
-  if (_g1h->free_regions_coming()) {
-    return;
-  }
-
-  // Make sure we append the secondary_free_list on the free_list so
-  // that all free regions we will come across can be safely
-  // attributed to the free_list.
-  _g1h->append_secondary_free_list_if_not_empty_with_lock();
 
   // Finally, make sure that the region accounting in the lists is
   // consistent with what we see in the heap.
@@ -689,10 +651,8 @@
   bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
 
   bool res_n = true;
-  // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
-  // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
-  // if we happen to be in that state.
-  if (_g1h->collector_state()->mark_in_progress() || !_g1h->_cmThread->in_progress()) {
+  // We cannot verify the next bitmap while we are about to clear it.
+  if (!_g1h->collector_state()->clearing_next_bitmap()) {
     res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
   }
   if (!res_p || !res_n) {
@@ -704,7 +664,9 @@
 }
 
 void G1HeapVerifier::check_bitmaps(const char* caller, HeapRegion* hr) {
-  if (!G1VerifyBitmaps) return;
+  if (!G1VerifyBitmaps) {
+    return;
+  }
 
   guarantee(verify_bitmaps(caller, hr), "bitmap verification");
 }
@@ -731,7 +693,9 @@
 };
 
 void G1HeapVerifier::check_bitmaps(const char* caller) {
-  if (!G1VerifyBitmaps) return;
+  if (!G1VerifyBitmaps) {
+    return;
+  }
 
   G1VerifyBitmapClosure cl(caller, this);
   _g1h->heap_region_iterate(&cl);
--- a/src/hotspot/share/gc/g1/g1HeapVerifier.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1HeapVerifier.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,8 +33,9 @@
 
 class G1HeapVerifier : public CHeapObj<mtGC> {
 private:
+  static int _enabled_verification_types;
+
   G1CollectedHeap* _g1h;
-  int _enabled_verification_types;
 
   // verify_region_sets() performs verification over the region
   // lists. It will be compiled in the product code to be used when
@@ -52,11 +53,10 @@
     G1VerifyAll         = -1
   };
 
-  G1HeapVerifier(G1CollectedHeap* heap) : _g1h(heap), _enabled_verification_types(G1VerifyAll) { }
+  G1HeapVerifier(G1CollectedHeap* heap) : _g1h(heap) {}
 
-  void parse_verification_type(const char* type);
-  void enable_verification_type(G1VerifyType type);
-  bool should_verify(G1VerifyType type);
+  static void enable_verification_type(G1VerifyType type);
+  static bool should_verify(G1VerifyType type);
 
   // Perform verification.
 
--- a/src/hotspot/share/gc/g1/g1InCSetState.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1InCSetState.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,6 @@
 
 #include "gc/g1/g1BiasedArray.hpp"
 #include "gc/g1/heapRegion.hpp"
-#include "memory/allocation.hpp"
 
 // Per-region state during garbage collection.
 struct InCSetState {
--- a/src/hotspot/share/gc/g1/g1OopClosures.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1OopClosures.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -151,11 +151,11 @@
 };
 
 class G1CLDScanClosure : public CLDClosure {
- G1ParCopyHelper* _closure;
- bool             _process_only_dirty;
- bool             _must_claim;
- int              _count;
- public:
+  G1ParCopyHelper* _closure;
+  bool             _process_only_dirty;
+  bool             _must_claim;
+  int              _count;
+public:
   G1CLDScanClosure(G1ParCopyHelper* closure,
                    bool process_only_dirty, bool must_claim)
       : _process_only_dirty(process_only_dirty), _must_claim(must_claim), _closure(closure), _count(0) {}
@@ -164,13 +164,10 @@
 
 // Closure for iterating over object fields during concurrent marking
 class G1CMOopClosure : public MetadataAwareOopClosure {
-protected:
-  G1ConcurrentMark*  _cm;
-private:
   G1CollectedHeap*   _g1h;
   G1CMTask*          _task;
 public:
-  G1CMOopClosure(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1CMTask* task);
+  G1CMOopClosure(G1CollectedHeap* g1h,G1CMTask* task);
   template <class T> void do_oop_nv(T* p);
   virtual void do_oop(      oop* p) { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
@@ -181,9 +178,10 @@
 private:
   G1CollectedHeap* _g1h;
   G1ConcurrentMark* _cm;
+  uint _worker_id;
 public:
-  G1RootRegionScanClosure(G1CollectedHeap* g1h, G1ConcurrentMark* cm) :
-    _g1h(g1h), _cm(cm) { }
+  G1RootRegionScanClosure(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint worker_id) :
+    _g1h(g1h), _cm(cm), _worker_id(worker_id) { }
   template <class T> void do_oop_nv(T* p);
   virtual void do_oop(      oop* p) { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
@@ -207,4 +205,18 @@
   virtual void do_oop(oop* p)       { do_oop_nv(p); }
 };
 
+class G1RebuildRemSetClosure : public ExtendedOopClosure {
+  G1CollectedHeap* _g1;
+  uint _worker_id;
+public:
+  G1RebuildRemSetClosure(G1CollectedHeap* g1, uint worker_id) : _g1(g1), _worker_id(worker_id) {
+  }
+
+  template <class T> void do_oop_nv(T* p);
+  virtual void do_oop(oop* p)       { do_oop_nv(p); }
+  virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
+  // This closure needs special handling for InstanceRefKlass.
+  virtual ReferenceIterationMode reference_iteration_mode() { return DO_DISCOVERED_AND_DISCOVERY; }
+};
+
 #endif // SHARE_VM_GC_G1_G1OOPCLOSURES_HPP
--- a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,8 @@
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "memory/iterator.inline.hpp"
 #include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "oops/oopsHierarchy.hpp"
 #include "runtime/prefetch.inline.hpp"
 
 template <class T>
@@ -49,9 +51,9 @@
   // slightly paranoid test; I'm trying to catch potential
   // problems before we go into push_on_queue to know where the
   // problem is coming from
-  assert((obj == oopDesc::load_decode_heap_oop(p)) ||
+  assert((obj == RawAccess<>::oop_load(p)) ||
          (obj->is_forwarded() &&
-         obj->forwardee() == oopDesc::load_decode_heap_oop(p)),
+         obj->forwardee() == RawAccess<>::oop_load(p)),
          "p should still be pointing to obj or to its forwardee");
 
   _par_scan_state->push_on_queue(p);
@@ -66,12 +68,12 @@
 
 template <class T>
 inline void G1ScanEvacuatedObjClosure::do_oop_nv(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
+  T heap_oop = RawAccess<>::oop_load(p);
 
-  if (oopDesc::is_null(heap_oop)) {
+  if (CompressedOops::is_null(heap_oop)) {
     return;
   }
-  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  oop obj = CompressedOops::decode_not_null(heap_oop);
   const InCSetState state = _g1->in_cset_state(obj);
   if (state.is_in_cset()) {
     prefetch_and_push(p, obj);
@@ -86,18 +88,17 @@
 
 template <class T>
 inline void G1CMOopClosure::do_oop_nv(T* p) {
-  oop obj = RawAccess<MO_VOLATILE>::oop_load(p);
-  _task->deal_with_reference(obj);
+  _task->deal_with_reference(p);
 }
 
 template <class T>
 inline void G1RootRegionScanClosure::do_oop_nv(T* p) {
   T heap_oop = RawAccess<MO_VOLATILE>::oop_load(p);
-  if (oopDesc::is_null(heap_oop)) {
+  if (CompressedOops::is_null(heap_oop)) {
     return;
   }
-  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-  _cm->mark_in_next_bitmap(obj);
+  oop obj = CompressedOops::decode_not_null(heap_oop);
+  _cm->mark_in_next_bitmap(_worker_id, obj);
 }
 
 template <class T>
@@ -124,10 +125,10 @@
 template <class T>
 inline void G1ConcurrentRefineOopClosure::do_oop_nv(T* p) {
   T o = RawAccess<MO_VOLATILE>::oop_load(p);
-  if (oopDesc::is_null(o)) {
+  if (CompressedOops::is_null(o)) {
     return;
   }
-  oop obj = oopDesc::decode_heap_oop_not_null(o);
+  oop obj = CompressedOops::decode_not_null(o);
 
   check_obj_during_refinement(p, obj);
 
@@ -142,19 +143,21 @@
     return;
   }
 
-  HeapRegion* to = _g1->heap_region_containing(obj);
+  HeapRegionRemSet* to_rem_set = _g1->heap_region_containing(obj)->rem_set();
 
-  assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
-  to->rem_set()->add_reference(p, _worker_i);
+  assert(to_rem_set != NULL, "Need per-region 'into' remsets.");
+  if (to_rem_set->is_tracked()) {
+    to_rem_set->add_reference(p, _worker_i);
+  }
 }
 
 template <class T>
 inline void G1ScanObjsDuringUpdateRSClosure::do_oop_nv(T* p) {
-  T o = oopDesc::load_heap_oop(p);
-  if (oopDesc::is_null(o)) {
+  T o = RawAccess<>::oop_load(p);
+  if (CompressedOops::is_null(o)) {
     return;
   }
-  oop obj = oopDesc::decode_heap_oop_not_null(o);
+  oop obj = CompressedOops::decode_not_null(o);
 
   check_obj_during_refinement(p, obj);
 
@@ -176,11 +179,11 @@
 
 template <class T>
 inline void G1ScanObjsDuringScanRSClosure::do_oop_nv(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (oopDesc::is_null(heap_oop)) {
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (CompressedOops::is_null(heap_oop)) {
     return;
   }
-  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  oop obj = CompressedOops::decode_not_null(heap_oop);
 
   const InCSetState state = _g1->in_cset_state(obj);
   if (state.is_in_cset()) {
@@ -202,7 +205,8 @@
 void G1ParCopyHelper::mark_object(oop obj) {
   assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
 
-  _cm->mark_in_next_bitmap(obj);
+  // We know that the object is not moving so it's safe to read its size.
+  _cm->mark_in_next_bitmap(_worker_id, obj);
 }
 
 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
@@ -213,19 +217,23 @@
   assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
   assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
 
-  _cm->mark_in_next_bitmap(to_obj);
+  // The object might be in the process of being copied by another
+  // worker so we cannot trust that its to-space image is
+  // well-formed. So we have to read its size from its from-space
+  // image which we know should not be changing.
+  _cm->mark_in_next_bitmap(_worker_id, to_obj, from_obj->size());
 }
 
 template <G1Barrier barrier, G1Mark do_mark_object>
 template <class T>
 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
+  T heap_oop = RawAccess<>::oop_load(p);
 
-  if (oopDesc::is_null(heap_oop)) {
+  if (CompressedOops::is_null(heap_oop)) {
     return;
   }
 
-  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  oop obj = CompressedOops::decode_not_null(heap_oop);
 
   assert(_worker_id == _par_scan_state->worker_id(), "sanity");
 
@@ -239,7 +247,7 @@
       forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
     }
     assert(forwardee != NULL, "forwardee should not be NULL");
-    oopDesc::encode_store_heap_oop(p, forwardee);
+    RawAccess<>::oop_store(p, forwardee);
     if (do_mark_object != G1MarkNone && forwardee != obj) {
       // If the object is self-forwarded we don't need to explicitly
       // mark it, the evacuation failure protocol will do so.
@@ -261,4 +269,20 @@
     }
   }
 }
+
+template <class T> void G1RebuildRemSetClosure::do_oop_nv(T* p) {
+  oop const obj = RawAccess<MO_VOLATILE>::oop_load(p);
+  if (obj == NULL) {
+    return;
+  }
+
+  if (HeapRegion::is_in_same_region(p, obj)) {
+    return;
+  }
+
+  HeapRegion* to = _g1->heap_region_containing(obj);
+  HeapRegionRemSet* rem_set = to->rem_set();
+  rem_set->add_reference(p, _worker_id);
+}
+
 #endif // SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -33,6 +33,7 @@
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
 #include "memory/allocation.inline.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/prefetch.inline.hpp"
 
@@ -104,7 +105,7 @@
   assert(ref != NULL, "invariant");
   assert(UseCompressedOops, "sanity");
   assert(!has_partial_array_mask(ref), "ref=" PTR_FORMAT, p2i(ref));
-  oop p = oopDesc::load_decode_heap_oop(ref);
+  oop p = RawAccess<>::oop_load(ref);
   assert(_g1h->is_in_g1_reserved(p),
          "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
   return true;
@@ -118,7 +119,7 @@
     assert(_g1h->is_in_cset(p),
            "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
   } else {
-    oop p = oopDesc::load_decode_heap_oop(ref);
+    oop p = RawAccess<>::oop_load(ref);
     assert(_g1h->is_in_g1_reserved(p),
            "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
   }
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -31,6 +31,7 @@
 #include "gc/g1/g1OopClosures.hpp"
 #include "gc/g1/g1Policy.hpp"
 #include "gc/g1/g1RemSet.hpp"
+#include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/shared/ageTable.hpp"
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
@@ -102,8 +103,9 @@
   template <class T> void update_rs(HeapRegion* from, T* p, oop o) {
     assert(!HeapRegion::is_in_same_region(p, o), "Caller should have filtered out cross-region references already.");
     // If the field originates from the to-space, we don't need to include it
-    // in the remembered set updates.
-    if (!from->is_young()) {
+    // in the remembered set updates. Also, if we are not tracking the remembered
+    // set in the destination region, do not bother either.
+    if (!from->is_young() && _g1h->heap_region_containing((HeapWord*)o)->rem_set()->is_tracked()) {
       size_t card_index = ct()->index_for(p);
       // If the card hasn't been added to the buffer, do it.
       if (ct()->mark_card_deferred(card_index)) {
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,12 +27,12 @@
 
 #include "gc/g1/g1ParScanThreadState.hpp"
 #include "gc/g1/g1RemSet.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 
 template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) {
-  assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
-         "Reference should not be NULL here as such are never pushed to the task queue.");
-  oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+  // Reference should not be NULL here as such are never pushed to the task queue.
+  oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
 
   // Although we never intentionally push references outside of the collection
   // set, due to (benign) races in the claim mechanism during RSet scanning more
@@ -46,7 +46,7 @@
     } else {
       obj = copy_to_survivor_space(in_cset_state, obj, m);
     }
-    oopDesc::encode_store_heap_oop(p, obj);
+    RawAccess<>::oop_store(p, obj);
   } else if (in_cset_state.is_humongous()) {
     _g1h->set_humongous_is_live(obj);
   } else {
@@ -146,4 +146,3 @@
 }
 
 #endif // SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
-
--- a/src/hotspot/share/gc/g1/g1Policy.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -23,11 +23,11 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/g1/concurrentMarkThread.inline.hpp"
 #include "gc/g1/g1Analytics.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectionSet.hpp"
 #include "gc/g1/g1ConcurrentMark.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
 #include "gc/g1/g1ConcurrentRefine.hpp"
 #include "gc/g1/g1HotCardCache.hpp"
 #include "gc/g1/g1IHOPControl.hpp"
@@ -49,6 +49,7 @@
 G1Policy::G1Policy(STWGCTimer* gc_timer) :
   _predictor(G1ConfidencePercent / 100.0),
   _analytics(new G1Analytics(&_predictor)),
+  _remset_tracker(),
   _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
   _ihop_control(create_ihop_control(&_predictor)),
   _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
@@ -66,7 +67,8 @@
   _tenuring_threshold(MaxTenuringThreshold),
   _max_survivor_regions(0),
   _survivors_age_table(true),
-  _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC) { }
+  _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC) {
+}
 
 G1Policy::~G1Policy() {
   delete _ihop_control;
@@ -227,7 +229,7 @@
 
   uint young_list_target_length = 0;
   if (adaptive_young_list_length()) {
-    if (collector_state()->gcs_are_young()) {
+    if (collector_state()->in_young_only_phase()) {
       young_list_target_length =
                         calculate_young_list_target_length(rs_lengths,
                                                            base_min_length,
@@ -279,7 +281,7 @@
                                                     uint desired_min_length,
                                                     uint desired_max_length) const {
   assert(adaptive_young_list_length(), "pre-condition");
-  assert(collector_state()->gcs_are_young(), "only call this for young GCs");
+  assert(collector_state()->in_young_only_phase(), "only call this for young GCs");
 
   // In case some edge-condition makes the desired max length too small...
   if (desired_max_length <= desired_min_length) {
@@ -300,7 +302,7 @@
   const double survivor_regions_evac_time = predict_survivor_regions_evac_time();
   const size_t pending_cards = _analytics->predict_pending_cards();
   const size_t adj_rs_lengths = rs_lengths + _analytics->predict_rs_length_diff();
-  const size_t scanned_cards = _analytics->predict_card_num(adj_rs_lengths, /* gcs_are_young */ true);
+  const size_t scanned_cards = _analytics->predict_card_num(adj_rs_lengths, true /* for_young_gc */);
   const double base_time_ms =
     predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
     survivor_regions_evac_time;
@@ -311,7 +313,7 @@
   // Here, we will make sure that the shortest young length that
   // makes sense fits within the target pause time.
 
-  G1YoungLengthPredictor p(collector_state()->during_concurrent_mark(),
+  G1YoungLengthPredictor p(collector_state()->mark_or_rebuild_in_progress(),
                            base_time_ms,
                            base_free_regions,
                            target_pause_time_ms,
@@ -382,7 +384,7 @@
   for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin();
        it != survivor_regions->end();
        ++it) {
-    survivor_regions_evac_time += predict_region_elapsed_time_ms(*it, collector_state()->gcs_are_young());
+    survivor_regions_evac_time += predict_region_elapsed_time_ms(*it, collector_state()->in_young_only_phase());
   }
   return survivor_regions_evac_time;
 }
@@ -404,7 +406,7 @@
 }
 
 void G1Policy::update_rs_lengths_prediction(size_t prediction) {
-  if (collector_state()->gcs_are_young() && adaptive_young_list_length()) {
+  if (collector_state()->in_young_only_phase() && adaptive_young_list_length()) {
     _rs_lengths_prediction = prediction;
   }
 }
@@ -412,7 +414,9 @@
 void G1Policy::record_full_collection_start() {
   _full_collection_start_sec = os::elapsedTime();
   // Release the future to-space so that it is available for compaction into.
-  collector_state()->set_full_collection(true);
+  collector_state()->set_in_young_only_phase(false);
+  collector_state()->set_in_full_gc(true);
+  cset_chooser()->clear();
 }
 
 void G1Policy::record_full_collection_end() {
@@ -424,16 +428,16 @@
 
   _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
 
-  collector_state()->set_full_collection(false);
+  collector_state()->set_in_full_gc(false);
 
   // "Nuke" the heuristics that control the young/mixed GC
   // transitions and make sure we start with young GCs after the Full GC.
-  collector_state()->set_gcs_are_young(true);
-  collector_state()->set_last_young_gc(false);
+  collector_state()->set_in_young_only_phase(true);
+  collector_state()->set_in_young_gc_before_mixed(false);
   collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
-  collector_state()->set_during_initial_mark_pause(false);
-  collector_state()->set_in_marking_window(false);
-  collector_state()->set_in_marking_window_im(false);
+  collector_state()->set_in_initial_mark_gc(false);
+  collector_state()->set_mark_or_rebuild_in_progress(false);
+  collector_state()->set_clearing_next_bitmap(false);
 
   _short_lived_surv_rate_group->start_adding_regions();
   // also call this on any additional surv rate groups
@@ -443,7 +447,6 @@
   _survivor_surv_rate_group->reset();
   update_young_list_max_and_target_length();
   update_rs_lengths_prediction();
-  cset_chooser()->clear();
 
   _bytes_allocated_in_old_since_last_gc = 0;
 
@@ -466,8 +469,6 @@
   _collection_set->reset_bytes_used_before();
   _bytes_copied_during_gc = 0;
 
-  collector_state()->set_last_gc_was_young(false);
-
   // do that for any other surv rate groups
   _short_lived_surv_rate_group->stop_adding_regions();
   _survivors_age_table.clear();
@@ -476,14 +477,12 @@
 }
 
 void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
-  collector_state()->set_during_marking(true);
   assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
-  collector_state()->set_during_initial_mark_pause(false);
+  collector_state()->set_in_initial_mark_gc(false);
 }
 
 void G1Policy::record_concurrent_mark_remark_start() {
   _mark_remark_start_sec = os::elapsedTime();
-  collector_state()->set_during_marking(false);
 }
 
 void G1Policy::record_concurrent_mark_remark_end() {
@@ -499,17 +498,6 @@
   _mark_cleanup_start_sec = os::elapsedTime();
 }
 
-void G1Policy::record_concurrent_mark_cleanup_completed() {
-  bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
-                                                              "skip last young-only gc");
-  collector_state()->set_last_young_gc(should_continue_with_reclaim);
-  // We skip the marking phase.
-  if (!should_continue_with_reclaim) {
-    abort_time_to_mixed_tracking();
-  }
-  collector_state()->set_in_marking_window(false);
-}
-
 double G1Policy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
   return phase_times()->average_time_ms(phase);
 }
@@ -537,7 +525,7 @@
 }
 
 bool G1Policy::about_to_start_mixed_phase() const {
-  return _g1->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->last_young_gc();
+  return _g1->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->in_young_gc_before_mixed();
 }
 
 bool G1Policy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
@@ -553,7 +541,7 @@
 
   bool result = false;
   if (marking_request_bytes > marking_initiating_used_threshold) {
-    result = collector_state()->gcs_are_young() && !collector_state()->last_young_gc();
+    result = collector_state()->in_young_only_phase() && !collector_state()->in_young_gc_before_mixed();
     log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s",
                               result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
                               cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source);
@@ -570,15 +558,17 @@
 
   size_t cur_used_bytes = _g1->used();
   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
-  bool last_pause_included_initial_mark = false;
+  bool this_pause_included_initial_mark = false;
+  bool this_pause_was_young_only = collector_state()->in_young_only_phase();
+
   bool update_stats = !_g1->evacuation_failed();
 
   record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
 
   _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 
-  last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
-  if (last_pause_included_initial_mark) {
+  this_pause_included_initial_mark = collector_state()->in_initial_mark_gc();
+  if (this_pause_included_initial_mark) {
     record_concurrent_mark_init_end(0.0);
   } else {
     maybe_start_marking();
@@ -611,36 +601,21 @@
     _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms);
   }
 
-  bool new_in_marking_window = collector_state()->in_marking_window();
-  bool new_in_marking_window_im = false;
-  if (last_pause_included_initial_mark) {
-    new_in_marking_window = true;
-    new_in_marking_window_im = true;
-  }
-
-  if (collector_state()->last_young_gc()) {
-    // This is supposed to to be the "last young GC" before we start
-    // doing mixed GCs. Here we decide whether to start mixed GCs or not.
-    assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
-
-    if (next_gc_should_be_mixed("start mixed GCs",
-                                "do not start mixed GCs")) {
-      collector_state()->set_gcs_are_young(false);
-    } else {
-      // We aborted the mixed GC phase early.
-      abort_time_to_mixed_tracking();
-    }
-
-    collector_state()->set_last_young_gc(false);
-  }
-
-  if (!collector_state()->last_gc_was_young()) {
-    // This is a mixed GC. Here we decide whether to continue doing
+  if (collector_state()->in_young_gc_before_mixed()) {
+    assert(!this_pause_included_initial_mark, "The young GC before mixed is not allowed to be an initial mark GC");
+    // This has been the young GC before we start doing mixed GCs. We already
+    // decided to start mixed GCs much earlier, so there is nothing to do except
+    // advancing the state.
+    collector_state()->set_in_young_only_phase(false);
+    collector_state()->set_in_young_gc_before_mixed(false);
+  } else if (!this_pause_was_young_only) {
+    // This is a mixed GC. Here we decide whether to continue doing more
     // mixed GCs or not.
     if (!next_gc_should_be_mixed("continue mixed GCs",
                                  "do not continue mixed GCs")) {
-      collector_state()->set_gcs_are_young(true);
+      collector_state()->set_in_young_only_phase(true);
 
+      clear_collection_set_candidates();
       maybe_start_marking();
     }
   }
@@ -661,13 +636,13 @@
     double cost_per_entry_ms = 0.0;
     if (cards_scanned > 10) {
       cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
-      _analytics->report_cost_per_entry_ms(cost_per_entry_ms, collector_state()->last_gc_was_young());
+      _analytics->report_cost_per_entry_ms(cost_per_entry_ms, this_pause_was_young_only);
     }
 
     if (_max_rs_lengths > 0) {
       double cards_per_entry_ratio =
         (double) cards_scanned / (double) _max_rs_lengths;
-      _analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, collector_state()->last_gc_was_young());
+      _analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, this_pause_was_young_only);
     }
 
     // This is defensive. For a while _max_rs_lengths could get
@@ -696,7 +671,7 @@
 
     if (copied_bytes > 0) {
       cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
-      _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->in_marking_window());
+      _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->mark_or_rebuild_in_progress());
     }
 
     if (_collection_set->young_region_length() > 0) {
@@ -715,8 +690,12 @@
     _analytics->report_rs_lengths((double) _max_rs_lengths);
   }
 
-  collector_state()->set_in_marking_window(new_in_marking_window);
-  collector_state()->set_in_marking_window_im(new_in_marking_window_im);
+  assert(!(this_pause_included_initial_mark && collector_state()->mark_or_rebuild_in_progress()),
+         "If the last pause has been an initial mark, we should not have been in the marking window");
+  if (this_pause_included_initial_mark) {
+    collector_state()->set_mark_or_rebuild_in_progress(true);
+  }
+
   _free_regions_at_end_of_collection = _g1->num_free_regions();
   // IHOP control wants to know the expected young gen length if it were not
   // restrained by the heap reserve. Using the actual length would make the
@@ -727,7 +706,8 @@
 
   update_ihop_prediction(app_time_ms / 1000.0,
                          _bytes_allocated_in_old_since_last_gc,
-                         last_unrestrained_young_length * HeapRegion::GrainBytes);
+                         last_unrestrained_young_length * HeapRegion::GrainBytes,
+                         this_pause_was_young_only);
   _bytes_allocated_in_old_since_last_gc = 0;
 
   _ihop_control->send_trace_event(_g1->gc_tracer_stw());
@@ -745,8 +725,8 @@
     update_rs_time_goal_ms -= scan_hcc_time_ms;
   }
   _g1->concurrent_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
-                                      phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
-                                      update_rs_time_goal_ms);
+                                   phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
+                                   update_rs_time_goal_ms);
 
   cset_chooser()->verify();
 }
@@ -764,7 +744,8 @@
 
 void G1Policy::update_ihop_prediction(double mutator_time_s,
                                       size_t mutator_alloc_bytes,
-                                      size_t young_gen_size) {
+                                      size_t young_gen_size,
+                                      bool this_gc_was_young_only) {
   // Always try to update IHOP prediction. Even evacuation failures give information
   // about e.g. whether to start IHOP earlier next time.
 
@@ -775,7 +756,7 @@
   bool report = false;
 
   double marking_to_mixed_time = -1.0;
-  if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) {
+  if (!this_gc_was_young_only && _initial_mark_to_mixed.has_result()) {
     marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time();
     assert(marking_to_mixed_time > 0.0,
            "Initial mark to mixed time must be larger than zero but is %.3f",
@@ -790,7 +771,7 @@
   // all of them. In many applications there are only a few if any young gcs during
   // marking, which makes any prediction useless. This increases the accuracy of the
   // prediction.
-  if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) {
+  if (this_gc_was_young_only && mutator_time_s > min_valid_time) {
     _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
     report = true;
   }
@@ -826,13 +807,13 @@
                                               size_t scanned_cards) const {
   return
     _analytics->predict_rs_update_time_ms(pending_cards) +
-    _analytics->predict_rs_scan_time_ms(scanned_cards, collector_state()->gcs_are_young()) +
+    _analytics->predict_rs_scan_time_ms(scanned_cards, collector_state()->in_young_only_phase()) +
     _analytics->predict_constant_other_time_ms();
 }
 
 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards) const {
   size_t rs_length = _analytics->predict_rs_lengths() + _analytics->predict_rs_length_diff();
-  size_t card_num = _analytics->predict_card_num(rs_length, collector_state()->gcs_are_young());
+  size_t card_num = _analytics->predict_card_num(rs_length, collector_state()->in_young_only_phase());
   return predict_base_elapsed_time_ms(pending_cards, card_num);
 }
 
@@ -858,8 +839,8 @@
   size_t bytes_to_copy = predict_bytes_to_copy(hr);
 
   double region_elapsed_time_ms =
-    _analytics->predict_rs_scan_time_ms(card_num, collector_state()->gcs_are_young()) +
-    _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->during_concurrent_mark());
+    _analytics->predict_rs_scan_time_ms(card_num, collector_state()->in_young_only_phase()) +
+    _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->mark_or_rebuild_in_progress());
 
   // The prediction of the "other" time for this region is based
   // upon the region type and NOT the GC type.
@@ -942,7 +923,7 @@
 }
 
 void G1Policy::initiate_conc_mark() {
-  collector_state()->set_during_initial_mark_pause(true);
+  collector_state()->set_in_initial_mark_gc(true);
   collector_state()->set_initiate_conc_mark_if_possible(false);
 }
 
@@ -950,27 +931,32 @@
   // We are about to decide on whether this pause will be an
   // initial-mark pause.
 
-  // First, collector_state()->during_initial_mark_pause() should not be already set. We
+  // First, collector_state()->in_initial_mark_gc() should not be already set. We
   // will set it here if we have to. However, it should be cleared by
   // the end of the pause (it's only set for the duration of an
   // initial-mark pause).
-  assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
+  assert(!collector_state()->in_initial_mark_gc(), "pre-condition");
 
   if (collector_state()->initiate_conc_mark_if_possible()) {
     // We had noticed on a previous pause that the heap occupancy has
     // gone over the initiating threshold and we should start a
     // concurrent marking cycle. So we might initiate one.
 
-    if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) {
+    if (!about_to_start_mixed_phase() && collector_state()->in_young_only_phase()) {
       // Initiate a new initial mark if there is no marking or reclamation going on.
       initiate_conc_mark();
       log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
     } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) {
       // Initiate a user requested initial mark. An initial mark must be young only
       // GC, so the collector state must be updated to reflect this.
-      collector_state()->set_gcs_are_young(true);
-      collector_state()->set_last_young_gc(false);
+      collector_state()->set_in_young_only_phase(true);
+      collector_state()->set_in_young_gc_before_mixed(false);
 
+      // We might have ended up coming here about to start a mixed phase with a collection set
+      // active. The following remark might change the change the "evacuation efficiency" of
+      // the regions in this set, leading to failing asserts later.
+      // Since the concurrent cycle will recreate the collection set anyway, simply drop it here.
+      clear_collection_set_candidates();
       abort_time_to_mixed_tracking();
       initiate_conc_mark();
       log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)");
@@ -995,6 +981,14 @@
 void G1Policy::record_concurrent_mark_cleanup_end() {
   cset_chooser()->rebuild(_g1->workers(), _g1->num_regions());
 
+  bool mixed_gc_pending = next_gc_should_be_mixed("request mixed gcs", "request young-only gcs");
+  if (!mixed_gc_pending) {
+    clear_collection_set_candidates();
+    abort_time_to_mixed_tracking();
+  }
+  collector_state()->set_in_young_gc_before_mixed(mixed_gc_pending);
+  collector_state()->set_mark_or_rebuild_in_progress(false);
+
   double end_sec = os::elapsedTime();
   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
   _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
@@ -1007,6 +1001,21 @@
   return percent_of(reclaimable_bytes, _g1->capacity());
 }
 
+class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure {
+  virtual bool do_heap_region(HeapRegion* r) {
+    r->rem_set()->clear_locked(true /* only_cardset */);
+    return false;
+  }
+};
+
+void G1Policy::clear_collection_set_candidates() {
+  // Clear remembered sets of remaining candidate regions and the actual candidate
+  // list.
+  G1ClearCollectionSetCandidateRemSets cl;
+  cset_chooser()->iterate(&cl);
+  cset_chooser()->clear();
+}
+
 void G1Policy::maybe_start_marking() {
   if (need_to_start_conc_mark("end of GC")) {
     // Note: this might have already been set, if during the last
@@ -1017,23 +1026,20 @@
 }
 
 G1Policy::PauseKind G1Policy::young_gc_pause_kind() const {
-  assert(!collector_state()->full_collection(), "must be");
-  if (collector_state()->during_initial_mark_pause()) {
-    assert(collector_state()->last_gc_was_young(), "must be");
-    assert(!collector_state()->last_young_gc(), "must be");
+  assert(!collector_state()->in_full_gc(), "must be");
+  if (collector_state()->in_initial_mark_gc()) {
+    assert(!collector_state()->in_young_gc_before_mixed(), "must be");
     return InitialMarkGC;
-  } else if (collector_state()->last_young_gc()) {
-    assert(!collector_state()->during_initial_mark_pause(), "must be");
-    assert(collector_state()->last_gc_was_young(), "must be");
+  } else if (collector_state()->in_young_gc_before_mixed()) {
+    assert(!collector_state()->in_initial_mark_gc(), "must be");
     return LastYoungGC;
-  } else if (!collector_state()->last_gc_was_young()) {
-    assert(!collector_state()->during_initial_mark_pause(), "must be");
-    assert(!collector_state()->last_young_gc(), "must be");
+  } else if (collector_state()->in_mixed_phase()) {
+    assert(!collector_state()->in_initial_mark_gc(), "must be");
+    assert(!collector_state()->in_young_gc_before_mixed(), "must be");
     return MixedGC;
   } else {
-    assert(collector_state()->last_gc_was_young(), "must be");
-    assert(!collector_state()->during_initial_mark_pause(), "must be");
-    assert(!collector_state()->last_young_gc(), "must be");
+    assert(!collector_state()->in_initial_mark_gc(), "must be");
+    assert(!collector_state()->in_young_gc_before_mixed(), "must be");
     return YoungOnlyGC;
   }
 }
--- a/src/hotspot/share/gc/g1/g1Policy.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1Policy.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -30,6 +30,7 @@
 #include "gc/g1/g1InCSetState.hpp"
 #include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp"
 #include "gc/g1/g1MMUTracker.hpp"
+#include "gc/g1/g1RemSetTrackingPolicy.hpp"
 #include "gc/g1/g1Predictions.hpp"
 #include "gc/g1/g1YoungGenSizer.hpp"
 #include "gc/shared/gcCause.hpp"
@@ -57,11 +58,13 @@
   // Update the IHOP control with necessary statistics.
   void update_ihop_prediction(double mutator_time_s,
                               size_t mutator_alloc_bytes,
-                              size_t young_gen_size);
+                              size_t young_gen_size,
+                              bool this_gc_was_young_only);
   void report_ihop_statistics();
 
   G1Predictions _predictor;
   G1Analytics* _analytics;
+  G1RemSetTrackingPolicy _remset_tracker;
   G1MMUTracker* _mmu_tracker;
   G1IHOPControl* _ihop_control;
 
@@ -103,10 +106,16 @@
   size_t _bytes_allocated_in_old_since_last_gc;
 
   G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed;
+
+  bool should_update_surv_rate_group_predictors() {
+    return collector_state()->in_young_only_phase() && !collector_state()->mark_or_rebuild_in_progress();
+  }
 public:
   const G1Predictions& predictor() const { return _predictor; }
   const G1Analytics* analytics()   const { return const_cast<const G1Analytics*>(_analytics); }
 
+  G1RemSetTrackingPolicy* remset_tracker() { return &_remset_tracker; }
+
   // Add the given number of bytes to the total number of allocated bytes in the old gen.
   void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; }
 
@@ -132,10 +141,6 @@
 
   double predict_survivor_regions_evac_time() const;
 
-  bool should_update_surv_rate_group_predictors() {
-    return collector_state()->last_gc_was_young() && !collector_state()->in_marking_window();
-  }
-
   void cset_regions_freed() {
     bool update = should_update_surv_rate_group_predictors();
 
@@ -254,6 +259,7 @@
   jlong collection_pause_end_millis() { return _collection_pause_end_millis; }
 
 private:
+  void clear_collection_set_candidates();
   // Sets up marking if proper conditions are met.
   void maybe_start_marking();
 
@@ -318,7 +324,6 @@
   // Record start, end, and completion of cleanup.
   void record_concurrent_mark_cleanup_start();
   void record_concurrent_mark_cleanup_end();
-  void record_concurrent_mark_cleanup_completed();
 
   void print_phases();
 
@@ -354,7 +359,7 @@
   // has to be the first thing that the pause does). If
   // initiate_conc_mark_if_possible() is true, and the concurrent
   // marking thread has completed its work during the previous cycle,
-  // it will set during_initial_mark_pause() to so that the pause does
+  // it will set in_initial_mark_gc() to so that the pause does
   // the initial-mark work and start a marking cycle.
   void decide_on_conc_mark_initiation();
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
+#include "memory/allocation.inline.hpp"
+
+G1RegionMarkStatsCache::G1RegionMarkStatsCache(G1RegionMarkStats* target, uint max_regions, uint num_cache_entries) :
+  _num_stats(max_regions),
+  _target(target),
+  _num_cache_entries(num_cache_entries),
+  _cache_hits(0),
+  _cache_misses(0) {
+
+  guarantee(is_power_of_2(num_cache_entries),
+            "Number of cache entries must be power of two, but is %u", num_cache_entries);
+  _cache = NEW_C_HEAP_ARRAY(G1RegionMarkStatsCacheEntry, _num_cache_entries, mtGC);
+  for (uint i = 0; i < _num_cache_entries; i++) {
+    _cache[i].clear();
+  }
+  _num_cache_entries_mask = _num_cache_entries - 1;
+}
+
+G1RegionMarkStatsCache::~G1RegionMarkStatsCache() {
+  FREE_C_HEAP_ARRAY(G1RegionMarkStatsCacheEntry, _cache);
+}
+
+// Evict all remaining statistics, returning cache hits and misses.
+Pair<size_t, size_t> G1RegionMarkStatsCache::evict_all() {
+  for (uint i = 0; i < _num_cache_entries; i++) {
+    evict(i);
+  }
+  return Pair<size_t,size_t>(_cache_hits, _cache_misses);
+}
+
+// Reset all cache entries to their default values.
+void G1RegionMarkStatsCache::reset() {
+  _cache_hits = 0;
+  _cache_misses = 0;
+
+  for (uint i = 0; i < _num_cache_entries; i++) {
+    _cache[i].clear();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1REGIONMARKSTATSCACHE_HPP
+#define SHARE_VM_GC_G1_G1REGIONMARKSTATSCACHE_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/pair.hpp"
+
+// Per-Region statistics gathered during marking.
+//
+// This includes
+// * the number of live words gathered during marking for the area from bottom
+// to ntams. This is an exact measure.
+// The code corrects later for the live data between ntams and top.
+struct G1RegionMarkStats {
+  size_t _live_words;
+
+  // Clear all members.
+  void clear() {
+    _live_words = 0;
+  }
+  // Clear all members after a marking overflow. Nothing to do as the live words
+  // are updated by the atomic mark. We do not remark objects after overflow.
+  void clear_during_overflow() {
+  }
+
+  bool is_clear() const { return _live_words == 0; }
+};
+
+// Per-marking thread cache for the region mark statistics.
+//
+// Each cache is a larg'ish map of region-idx -> G1RegionMarkStats entries that cache
+// currently gathered statistics; entries are evicted to the global statistics array
+// on every collision. This minimizes synchronization overhead which would be required
+// every time statistics change, as marking is very localized.
+// The map entry number is a power of two to allow simple and fast hashing using
+// logical and.
+class G1RegionMarkStatsCache {
+private:
+  // The array of statistics entries to evict to; the global array.
+  G1RegionMarkStats* _target;
+  // Number of entries in the eviction target.
+  uint _num_stats;
+
+  // An entry of the statistics cache.
+  struct G1RegionMarkStatsCacheEntry {
+    uint _region_idx;
+    G1RegionMarkStats _stats;
+
+    void clear() {
+      _region_idx = 0;
+      _stats.clear();
+    }
+
+    bool is_clear() const {
+      return _region_idx == 0 && _stats.is_clear();
+    }
+  };
+
+  // The actual cache and its number of entries.
+  G1RegionMarkStatsCacheEntry* _cache;
+  uint _num_cache_entries;
+
+  // Cache hits/miss counters.
+  size_t _cache_hits;
+  size_t _cache_misses;
+
+  // Evict a given element of the statistics cache.
+  void evict(uint idx);
+
+  size_t _num_cache_entries_mask;
+
+  uint hash(uint idx) {
+    return idx & _num_cache_entries_mask;
+  }
+
+  G1RegionMarkStatsCacheEntry* find_for_add(uint region_idx);
+public:
+  G1RegionMarkStatsCache(G1RegionMarkStats* target, uint max_regions, uint num_cache_entries);
+
+  ~G1RegionMarkStatsCache();
+
+  void add_live_words(uint region_idx, size_t live_words) {
+    G1RegionMarkStatsCacheEntry* const cur = find_for_add(region_idx);
+    cur->_stats._live_words += live_words;
+  }
+
+  void reset(uint region_idx) {
+    uint const cache_idx = hash(region_idx);
+    G1RegionMarkStatsCacheEntry* cur = &_cache[cache_idx];
+    if (cur->_region_idx == region_idx) {
+      _cache[cache_idx].clear();
+    }
+  }
+
+  // Evict all remaining statistics, returning cache hits and misses.
+  Pair<size_t, size_t> evict_all();
+
+  // Reset all cache entries to their default values.
+  void reset();
+
+  size_t hits() const { return _cache_hits; }
+  size_t misses() const { return _cache_misses; }
+};
+
+#endif // SHARE_VM_GC_G1_G1REGIONMARKSTATSCACHE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1REGIONMARKSTATSCACHE_INLINE_HPP
+#define SHARE_VM_GC_G1_G1REGIONMARKSTATSCACHE_INLINE_HPP
+
+#include "gc/g1/g1RegionMarkStatsCache.hpp"
+#include "runtime/atomic.hpp"
+
+inline G1RegionMarkStatsCache::G1RegionMarkStatsCacheEntry* G1RegionMarkStatsCache::find_for_add(uint region_idx) {
+  uint const cache_idx = hash(region_idx);
+
+  G1RegionMarkStatsCacheEntry* cur = &_cache[cache_idx];
+  if (cur->_region_idx != region_idx) {
+    evict(cache_idx);
+    cur->_region_idx = region_idx;
+    _cache_misses++;
+  } else {
+    _cache_hits++;
+  }
+
+  return cur;
+}
+
+inline void G1RegionMarkStatsCache::evict(uint idx) {
+  G1RegionMarkStatsCacheEntry* cur = &_cache[idx];
+  if (cur->_stats._live_words != 0) {
+    Atomic::add(cur->_stats._live_words, &_target[cur->_region_idx]._live_words);
+  }
+  cur->clear();
+}
+
+#endif // SHARE_VM_GC_G1_G1REGIONMARKSTATSCACHE_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -40,11 +40,13 @@
 #include "gc/shared/suspendibleThreadSet.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "utilities/align.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/intHisto.hpp"
 #include "utilities/stack.inline.hpp"
+#include "utilities/ticks.inline.hpp"
 
 // Collects information about the overall remembered set scan progress during an evacuation.
 class G1RemSetScanState : public CHeapObj<mtGC> {
@@ -74,8 +76,6 @@
     static size_t chunk_size() { return M; }
 
     void work(uint worker_id) {
-      G1CardTable* ct = _g1h->card_table();
-
       while (_cur_dirty_regions < _num_dirty_regions) {
         size_t next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length;
         size_t max = MIN2(next + _chunk_length, _num_dirty_regions);
@@ -83,7 +83,7 @@
         for (size_t i = next; i < max; i++) {
           HeapRegion* r = _g1h->region_at(_dirty_region_list[i]);
           if (!r->is_survivor()) {
-            ct->clear(MemRegion(r->bottom(), r->end()));
+            r->clear_cardtable();
           }
         }
       }
@@ -271,9 +271,6 @@
     workers->run_task(&cl, num_workers);
 
 #ifndef PRODUCT
-    // Need to synchronize with concurrent cleanup since it needs to
-    // finish its card table clearing before we can verify.
-    G1CollectedHeap::heap()->wait_while_free_regions_coming();
     G1CollectedHeap::heap()->verifier()->verify_card_table_cleanup();
 #endif
   }
@@ -298,20 +295,12 @@
 }
 
 uint G1RemSet::num_par_rem_sets() {
-  return MAX2(DirtyCardQueueSet::num_par_ids() + G1ConcurrentRefine::max_num_threads(), ParallelGCThreads);
+  return DirtyCardQueueSet::num_par_ids() + G1ConcurrentRefine::max_num_threads() + MAX2(ConcGCThreads, ParallelGCThreads);
 }
 
 void G1RemSet::initialize(size_t capacity, uint max_regions) {
   G1FromCardCache::initialize(num_par_rem_sets(), max_regions);
   _scan_state->initialize(max_regions);
-  {
-    GCTraceTime(Debug, gc, marking)("Initialize Card Live Data");
-    _card_live_data.initialize(capacity, max_regions);
-  }
-  if (G1PretouchAuxiliaryMemory) {
-    GCTraceTime(Debug, gc, marking)("Pre-Touch Card Live Data");
-    _card_live_data.pretouch();
-  }
 }
 
 G1ScanRSForRegionClosure::G1ScanRSForRegionClosure(G1RemSetScanState* scan_state,
@@ -514,27 +503,6 @@
   phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0);
 }
 
-class G1ScrubRSClosure: public HeapRegionClosure {
-  G1CollectedHeap* _g1h;
-  G1CardLiveData* _live_data;
-public:
-  G1ScrubRSClosure(G1CardLiveData* live_data) :
-    _g1h(G1CollectedHeap::heap()),
-    _live_data(live_data) { }
-
-  bool do_heap_region(HeapRegion* r) {
-    if (!r->is_continues_humongous()) {
-      r->rem_set()->scrub(_live_data);
-    }
-    return false;
-  }
-};
-
-void G1RemSet::scrub(uint worker_num, HeapRegionClaimer *hrclaimer) {
-  G1ScrubRSClosure scrub_cl(&_card_live_data);
-  _g1->heap_region_par_iterate_from_worker_offset(&scrub_cl, hrclaimer, worker_num);
-}
-
 inline void check_card_ptr(jbyte* card_ptr, G1CardTable* ct) {
 #ifdef ASSERT
   G1CollectedHeap* g1 = G1CollectedHeap::heap();
@@ -750,24 +718,267 @@
   }
 }
 
-void G1RemSet::create_card_live_data(WorkGang* workers, G1CMBitMap* mark_bitmap) {
-  _card_live_data.create(workers, mark_bitmap);
-}
+class G1RebuildRemSetTask: public AbstractGangTask {
+  // Aggregate the counting data that was constructed concurrently
+  // with marking.
+  class G1RebuildRemSetHeapRegionClosure : public HeapRegionClosure {
+    G1ConcurrentMark* _cm;
+    G1RebuildRemSetClosure _update_cl;
+
+    // Applies _update_cl to the references of the given object, limiting objArrays
+    // to the given MemRegion. Returns the amount of words actually scanned.
+    size_t scan_for_references(oop const obj, MemRegion mr) {
+      size_t const obj_size = obj->size();
+      // All non-objArrays and objArrays completely within the mr
+      // can be scanned without passing the mr.
+      if (!obj->is_objArray() || mr.contains(MemRegion((HeapWord*)obj, obj_size))) {
+        obj->oop_iterate(&_update_cl);
+        return obj_size;
+      }
+      // This path is for objArrays crossing the given MemRegion. Only scan the
+      // area within the MemRegion.
+      obj->oop_iterate(&_update_cl, mr);
+      return mr.intersection(MemRegion((HeapWord*)obj, obj_size)).word_size();
+    }
+
+    // A humongous object is live (with respect to the scanning) either
+    // a) it is marked on the bitmap as such
+    // b) its TARS is larger than TAMS, i.e. has been allocated during marking.
+    bool is_humongous_live(oop const humongous_obj, const G1CMBitMap* const bitmap, HeapWord* tams, HeapWord* tars) const {
+      return bitmap->is_marked(humongous_obj) || (tars > tams);
+    }
+
+    // Iterator over the live objects within the given MemRegion.
+    class LiveObjIterator : public StackObj {
+      const G1CMBitMap* const _bitmap;
+      const HeapWord* _tams;
+      const MemRegion _mr;
+      HeapWord* _current;
+
+      bool is_below_tams() const {
+        return _current < _tams;
+      }
+
+      bool is_live(HeapWord* obj) const {
+        return !is_below_tams() || _bitmap->is_marked(obj);
+      }
+
+      HeapWord* bitmap_limit() const {
+        return MIN2(const_cast<HeapWord*>(_tams), _mr.end());
+      }
+
+      void move_if_below_tams() {
+        if (is_below_tams() && has_next()) {
+          _current = _bitmap->get_next_marked_addr(_current, bitmap_limit());
+        }
+      }
+    public:
+      LiveObjIterator(const G1CMBitMap* const bitmap, const HeapWord* tams, const MemRegion mr, HeapWord* first_oop_into_mr) :
+          _bitmap(bitmap),
+          _tams(tams),
+          _mr(mr),
+          _current(first_oop_into_mr) {
+
+        assert(_current <= _mr.start(),
+               "First oop " PTR_FORMAT " should extend into mr [" PTR_FORMAT ", " PTR_FORMAT ")",
+               p2i(first_oop_into_mr), p2i(mr.start()), p2i(mr.end()));
+
+        // Step to the next live object within the MemRegion if needed.
+        if (is_live(_current)) {
+          // Non-objArrays were scanned by the previous part of that region.
+          if (_current < mr.start() && !oop(_current)->is_objArray()) {
+            _current += oop(_current)->size();
+            // We might have positioned _current on a non-live object. Reposition to the next
+            // live one if needed.
+            move_if_below_tams();
+          }
+        } else {
+          // The object at _current can only be dead if below TAMS, so we can use the bitmap.
+          // immediately.
+          _current = _bitmap->get_next_marked_addr(_current, bitmap_limit());
+          assert(_current == _mr.end() || is_live(_current),
+                 "Current " PTR_FORMAT " should be live (%s) or beyond the end of the MemRegion (" PTR_FORMAT ")",
+                 p2i(_current), BOOL_TO_STR(is_live(_current)), p2i(_mr.end()));
+        }
+      }
+
+      void move_to_next() {
+        _current += next()->size();
+        move_if_below_tams();
+      }
+
+      oop next() const {
+        oop result = oop(_current);
+        assert(is_live(_current),
+               "Object " PTR_FORMAT " must be live TAMS " PTR_FORMAT " below %d mr " PTR_FORMAT " " PTR_FORMAT " outside %d",
+               p2i(_current), p2i(_tams), _tams > _current, p2i(_mr.start()), p2i(_mr.end()), _mr.contains(result));
+        return result;
+      }
+
+      bool has_next() const {
+        return _current < _mr.end();
+      }
+    };
+
+    // Rebuild remembered sets in the part of the region specified by mr and hr.
+    // Objects between the bottom of the region and the TAMS are checked for liveness
+    // using the given bitmap. Objects between TAMS and TARS are assumed to be live.
+    // Returns the number of live words between bottom and TAMS.
+    size_t rebuild_rem_set_in_region(const G1CMBitMap* const bitmap,
+                                     HeapWord* const top_at_mark_start,
+                                     HeapWord* const top_at_rebuild_start,
+                                     HeapRegion* hr,
+                                     MemRegion mr) {
+      size_t marked_words = 0;
+
+      if (hr->is_humongous()) {
+        oop const humongous_obj = oop(hr->humongous_start_region()->bottom());
+        if (is_humongous_live(humongous_obj, bitmap, top_at_mark_start, top_at_rebuild_start)) {
+          // We need to scan both [bottom, TAMS) and [TAMS, top_at_rebuild_start);
+          // however in case of humongous objects it is sufficient to scan the encompassing
+          // area (top_at_rebuild_start is always larger or equal to TAMS) as one of the
+          // two areas will be zero sized. I.e. TAMS is either
+          // the same as bottom or top(_at_rebuild_start). There is no way TAMS has a different
+          // value: this would mean that TAMS points somewhere into the object.
+          assert(hr->top() == top_at_mark_start || hr->top() == top_at_rebuild_start,
+                 "More than one object in the humongous region?");
+          humongous_obj->oop_iterate(&_update_cl, mr);
+          return top_at_mark_start != hr->bottom() ? mr.byte_size() : 0;
+        } else {
+          return 0;
+        }
+      }
 
-void G1RemSet::finalize_card_live_data(WorkGang* workers, G1CMBitMap* mark_bitmap) {
-  _card_live_data.finalize(workers, mark_bitmap);
-}
+      for (LiveObjIterator it(bitmap, top_at_mark_start, mr, hr->block_start(mr.start())); it.has_next(); it.move_to_next()) {
+        oop obj = it.next();
+        size_t scanned_size = scan_for_references(obj, mr);
+        if ((HeapWord*)obj < top_at_mark_start) {
+          marked_words += scanned_size;
+        }
+      }
+
+      return marked_words * HeapWordSize;
+    }
+public:
+  G1RebuildRemSetHeapRegionClosure(G1CollectedHeap* g1h,
+                                   G1ConcurrentMark* cm,
+                                   uint worker_id) :
+    HeapRegionClosure(),
+    _cm(cm),
+    _update_cl(g1h, worker_id) { }
+
+    bool do_heap_region(HeapRegion* hr) {
+      if (_cm->has_aborted()) {
+        return true;
+      }
+
+      uint const region_idx = hr->hrm_index();
+      DEBUG_ONLY(HeapWord* const top_at_rebuild_start_check = _cm->top_at_rebuild_start(region_idx);)
+      assert(top_at_rebuild_start_check == NULL ||
+             top_at_rebuild_start_check > hr->bottom(),
+             "A TARS (" PTR_FORMAT ") == bottom() (" PTR_FORMAT ") indicates the old region %u is empty (%s)",
+             p2i(top_at_rebuild_start_check), p2i(hr->bottom()),  region_idx, hr->get_type_str());
+
+      size_t total_marked_bytes = 0;
+      size_t const chunk_size_in_words = G1RebuildRemSetChunkSize / HeapWordSize;
+
+      HeapWord* const top_at_mark_start = hr->next_top_at_mark_start();
+
+      HeapWord* cur = hr->bottom();
+      while (cur < hr->end()) {
+        // After every iteration (yield point) we need to check whether the region's
+        // TARS changed due to e.g. eager reclaim.
+        HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx);
+        if (top_at_rebuild_start == NULL) {
+          return false;
+        }
+
+        MemRegion next_chunk = MemRegion(hr->bottom(), top_at_rebuild_start).intersection(MemRegion(cur, chunk_size_in_words));
+        if (next_chunk.is_empty()) {
+          break;
+        }
+
+        const Ticks start = Ticks::now();
+        size_t marked_bytes = rebuild_rem_set_in_region(_cm->next_mark_bitmap(),
+                                                        top_at_mark_start,
+                                                        top_at_rebuild_start,
+                                                        hr,
+                                                        next_chunk);
+        Tickspan time = Ticks::now() - start;
 
-void G1RemSet::verify_card_live_data(WorkGang* workers, G1CMBitMap* bitmap) {
-  _card_live_data.verify(workers, bitmap);
+        log_trace(gc, remset, tracking)("Rebuilt region %u "
+                                        "live " SIZE_FORMAT " "
+                                        "time %.3fms "
+                                        "marked bytes " SIZE_FORMAT " "
+                                        "bot " PTR_FORMAT " "
+                                        "TAMS " PTR_FORMAT " "
+                                        "TARS " PTR_FORMAT,
+                                        region_idx,
+                                        _cm->liveness(region_idx) * HeapWordSize,
+                                        TicksToTimeHelper::seconds(time) * 1000.0,
+                                        marked_bytes,
+                                        p2i(hr->bottom()),
+                                        p2i(top_at_mark_start),
+                                        p2i(top_at_rebuild_start));
+
+        if (marked_bytes > 0) {
+          hr->add_to_marked_bytes(marked_bytes);
+          total_marked_bytes += marked_bytes;
+        }
+        cur += chunk_size_in_words;
+
+        _cm->do_yield_check();
+        if (_cm->has_aborted()) {
+          return true;
+        }
+      }
+      // In the final iteration of the loop the region might have been eagerly reclaimed.
+      // Simply filter out those regions. We can not just use region type because there
+      // might have already been new allocations into these regions.
+      DEBUG_ONLY(HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx);)
+      assert(!hr->is_old() ||
+             top_at_rebuild_start == NULL ||
+             total_marked_bytes == _cm->liveness(region_idx) * HeapWordSize,
+             "Marked bytes " SIZE_FORMAT " for region %u (%s) in [bottom, TAMS) do not match liveness during mark " SIZE_FORMAT " "
+             "(" PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT ")",
+             total_marked_bytes, hr->hrm_index(), hr->get_type_str(), _cm->liveness(region_idx) * HeapWordSize,
+             p2i(hr->bottom()), p2i(top_at_mark_start), p2i(top_at_rebuild_start));
+       // Abort state may have changed after the yield check.
+      return _cm->has_aborted();
+    }
+  };
+
+  HeapRegionClaimer _hr_claimer;
+  G1ConcurrentMark* _cm;
+
+  uint _worker_id_offset;
+public:
+  G1RebuildRemSetTask(G1ConcurrentMark* cm,
+                      uint n_workers,
+                      uint worker_id_offset) :
+      AbstractGangTask("G1 Rebuild Remembered Set"),
+      _cm(cm),
+      _hr_claimer(n_workers),
+      _worker_id_offset(worker_id_offset) {
+  }
+
+  void work(uint worker_id) {
+    SuspendibleThreadSetJoiner sts_join;
+
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+    G1RebuildRemSetHeapRegionClosure cl(g1h, _cm, _worker_id_offset + worker_id);
+    g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id);
+  }
+};
+
+void G1RemSet::rebuild_rem_set(G1ConcurrentMark* cm,
+                               WorkGang* workers,
+                               uint worker_id_offset) {
+  uint num_workers = workers->active_workers();
+
+  G1RebuildRemSetTask cl(cm,
+                         num_workers,
+                         worker_id_offset);
+  workers->run_task(&cl, num_workers);
 }
-
-void G1RemSet::clear_card_live_data(WorkGang* workers) {
-  _card_live_data.clear(workers);
-}
-
-#ifdef ASSERT
-void G1RemSet::verify_card_live_data_is_clear() {
-  _card_live_data.verify_is_clear();
-}
-#endif
--- a/src/hotspot/share/gc/g1/g1RemSet.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1RemSet.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,8 +26,8 @@
 #define SHARE_VM_GC_G1_G1REMSET_HPP
 
 #include "gc/g1/dirtyCardQueue.hpp"
-#include "gc/g1/g1CardLiveData.hpp"
 #include "gc/g1/g1CardTable.hpp"
+#include "gc/g1/g1OopClosures.hpp"
 #include "gc/g1/g1RemSetSummary.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "memory/allocation.hpp"
@@ -41,6 +41,7 @@
 class G1BlockOffsetTable;
 class CodeBlobClosure;
 class G1CollectedHeap;
+class G1CMBitMap;
 class G1HotCardCache;
 class G1RemSetScanState;
 class G1ParScanThreadState;
@@ -55,7 +56,6 @@
 class G1RemSet: public CHeapObj<mtGC> {
 private:
   G1RemSetScanState* _scan_state;
-  G1CardLiveData _card_live_data;
 
   G1RemSetSummary _prev_period_summary;
 
@@ -114,9 +114,6 @@
 
   G1RemSetScanState* scan_state() const { return _scan_state; }
 
-  // Eliminates any remembered set entries that correspond to dead heap ranges.
-  void scrub(uint worker_num, HeapRegionClaimer* hrclaimer);
-
   // Refine the card corresponding to "card_ptr". Safe to be called concurrently
   // to the mutator.
   void refine_card_concurrently(jbyte* card_ptr,
@@ -135,18 +132,9 @@
 
   size_t num_conc_refined_cards() const { return _num_conc_refined_cards; }
 
-  void create_card_live_data(WorkGang* workers, G1CMBitMap* mark_bitmap);
-  void finalize_card_live_data(WorkGang* workers, G1CMBitMap* mark_bitmap);
-
-  // Verify that the liveness count data created concurrently matches one created
-  // during this safepoint.
-  void verify_card_live_data(WorkGang* workers, G1CMBitMap* actual_bitmap);
-
-  void clear_card_live_data(WorkGang* workers);
-
-#ifdef ASSERT
-  void verify_card_live_data_is_clear();
-#endif
+  // Rebuilds the remembered set by scanning from bottom to TARS for all regions
+  // using the given work gang.
+  void rebuild_rem_set(G1ConcurrentMark* cm, WorkGang* workers, uint worker_id_offset);
 };
 
 class G1ScanRSForRegionClosure : public HeapRegionClosure {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/collectionSetChooser.hpp"
+#include "gc/g1/g1RemSetTrackingPolicy.hpp"
+#include "gc/g1/heapRegion.inline.hpp"
+#include "gc/g1/heapRegionRemSet.hpp"
+#include "runtime/safepoint.hpp"
+
+bool G1RemSetTrackingPolicy::is_interesting_humongous_region(HeapRegion* r) const {
+  return r->is_starts_humongous() && oop(r->bottom())->is_typeArray();
+}
+
+bool G1RemSetTrackingPolicy::needs_scan_for_rebuild(HeapRegion* r) const {
+  // All non-free, non-young, non-closed archive regions need to be scanned for references;
+  // At every gc we gather references to other regions in young, and closed archive
+  // regions by definition do not have references going outside the closed archive.
+  // Free regions trivially do not need scanning because they do not contain live
+  // objects.
+  return !(r->is_young() || r->is_closed_archive() || r->is_free());
+}
+
+void G1RemSetTrackingPolicy::update_at_allocate(HeapRegion* r) {
+  if (r->is_young()) {
+    // Always collect remembered set for young regions.
+    r->rem_set()->set_state_complete();
+  } else if (r->is_humongous()) {
+    // Collect remembered sets for humongous regions by default to allow eager reclaim.
+    r->rem_set()->set_state_complete();
+  } else if (r->is_archive()) {
+    // Archive regions never move ever. So never build remembered sets for them.
+    r->rem_set()->set_state_empty();
+  } else if (r->is_old()) {
+    // By default, do not create remembered set for new old regions.
+    r->rem_set()->set_state_empty();
+  } else {
+    guarantee(false, "Unhandled region %u with heap region type %s", r->hrm_index(), r->get_type_str());
+  }
+}
+
+void G1RemSetTrackingPolicy::update_at_free(HeapRegion* r) {
+  /* nothing to do */
+}
+
+bool G1RemSetTrackingPolicy::update_before_rebuild(HeapRegion* r, size_t live_bytes) {
+  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
+
+  bool selected_for_rebuild = false;
+
+  // Only consider updating the remembered set for old gen regions - excluding archive regions
+  // which never move (but are "Old" regions).
+  if (r->is_old_or_humongous() && !r->is_archive()) {
+    size_t between_ntams_and_top = (r->top() - r->next_top_at_mark_start()) * HeapWordSize;
+    size_t total_live_bytes = live_bytes + between_ntams_and_top;
+    // Completely free regions after rebuild are of no interest wrt rebuilding the
+    // remembered set.
+    assert(!r->rem_set()->is_updating(), "Remembered set of region %u is updating before rebuild", r->hrm_index());
+    // To be of interest for rebuilding the remembered set the following must apply:
+    // - They must contain some live data in them.
+    // - We always try to update the remembered sets of humongous regions containing
+    // type arrays if they are empty as they might have been reset after full gc.
+    // - Only need to rebuild non-complete remembered sets.
+    // - Otherwise only add those old gen regions which occupancy is low enough that there
+    // is a chance that we will ever evacuate them in the mixed gcs.
+    if ((total_live_bytes > 0) &&
+        (is_interesting_humongous_region(r) || CollectionSetChooser::region_occupancy_low_enough_for_evac(total_live_bytes)) &&
+        !r->rem_set()->is_tracked()) {
+
+      r->rem_set()->set_state_updating();
+      selected_for_rebuild = true;
+    }
+    log_trace(gc, remset, tracking)("Before rebuild region %u "
+                                    "(ntams: " PTR_FORMAT ") "
+                                    "total_live_bytes " SIZE_FORMAT " "
+                                    "selected %s "
+                                    "(live_bytes " SIZE_FORMAT " "
+                                    "next_marked " SIZE_FORMAT " "
+                                    "marked " SIZE_FORMAT " "
+                                    "type %s)",
+                                    r->hrm_index(),
+                                    p2i(r->next_top_at_mark_start()),
+                                    total_live_bytes,
+                                    BOOL_TO_STR(selected_for_rebuild),
+                                    live_bytes,
+                                    r->next_marked_bytes(),
+                                    r->marked_bytes(),
+                                    r->get_type_str());
+  }
+
+  return selected_for_rebuild;
+}
+
+void G1RemSetTrackingPolicy::update_after_rebuild(HeapRegion* r) {
+  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
+
+  if (r->is_old_or_humongous()) {
+    if (r->rem_set()->is_updating()) {
+      r->rem_set()->set_state_complete();
+    }
+    // We can drop remembered sets of humongous regions that have a too large remembered set:
+    // We will never try to eagerly reclaim or move them anyway until the next concurrent
+    // cycle as e.g. remembered set entries will always be added.
+    if (r->is_humongous() && !G1CollectedHeap::heap()->is_potential_eager_reclaim_candidate(r)) {
+      r->rem_set()->clear_locked(true /* only_cardset */);
+    }
+    assert(!r->is_continues_humongous() || r->rem_set()->is_empty(), "Continues humongous object remsets should be empty");
+    G1ConcurrentMark* cm = G1CollectedHeap::heap()->concurrent_mark();
+    log_trace(gc, remset, tracking)("After rebuild region %u "
+                                    "(ntams " PTR_FORMAT " "
+                                    "liveness " SIZE_FORMAT " "
+                                    "next_marked_bytes " SIZE_FORMAT " "
+                                    "remset occ " SIZE_FORMAT " "
+                                    "size " SIZE_FORMAT ")",
+                                    r->hrm_index(),
+                                    p2i(r->next_top_at_mark_start()),
+                                    cm->liveness(r->hrm_index()) * HeapWordSize,
+                                    r->next_marked_bytes(),
+                                    r->rem_set()->occupied_locked(),
+                                    r->rem_set()->mem_size());
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1REMSETTRACKINGPOLICY_HPP
+#define SHARE_VM_GC_G1_G1REMSETTRACKINGPOLICY_HPP
+
+#include "gc/g1/heapRegion.hpp"
+#include "gc/g1/heapRegionType.hpp"
+#include "memory/allocation.hpp"
+
+// The remembered set tracking policy determines for a given region the state of
+// the remembered set, ie. when it should be tracked, and if/when the remembered
+// set is complete.
+class G1RemSetTrackingPolicy : public CHeapObj<mtGC> {
+private:
+  // Is the given region an interesting humongous region to start remembered set tracking
+  // for?
+  bool is_interesting_humongous_region(HeapRegion* r) const;
+public:
+  // Do we need to scan the given region to get all outgoing references for remembered
+  // set rebuild?
+  bool needs_scan_for_rebuild(HeapRegion* r) const;
+  // Update remembered set tracking state at allocation of the region. May be
+  // called at any time. The caller makes sure that the changes to the remembered
+  // set state are visible to other threads.
+  void update_at_allocate(HeapRegion* r);
+  // Update remembered set tracking state before we are going to rebuild remembered
+  // sets. Called at safepoint in the remark pause.
+  bool update_before_rebuild(HeapRegion* r, size_t live_bytes);
+  // Update remembered set tracking state after rebuild is complete, i.e. the cleanup
+  // pause. Called at safepoint.
+  void update_after_rebuild(HeapRegion* r);
+  // Update remembered set tracking state when the region is freed.
+  void update_at_free(HeapRegion* r);
+};
+
+#endif /* SHARE_VM_GC_G1_G1REMSETTRACKINGPOLICY_HPP */
+
--- a/src/hotspot/share/gc/g1/g1RootClosures.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1RootClosures.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,8 +34,8 @@
 public:
   G1EvacuationClosures(G1CollectedHeap* g1h,
                        G1ParScanThreadState* pss,
-                       bool gcs_are_young) :
-      _closures(g1h, pss, gcs_are_young, /* must_claim_cld */ false) {}
+                       bool in_young_gc) :
+      _closures(g1h, pss, in_young_gc, /* must_claim_cld */ false) {}
 
   OopClosure* weak_oops()   { return &_closures._buffered_oops; }
   OopClosure* strong_oops() { return &_closures._buffered_oops; }
@@ -112,14 +112,14 @@
 
 G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures(G1ParScanThreadState* pss, G1CollectedHeap* g1h) {
   G1EvacuationRootClosures* res = NULL;
-  if (g1h->collector_state()->during_initial_mark_pause()) {
+  if (g1h->collector_state()->in_initial_mark_gc()) {
     if (ClassUnloadingWithConcurrentMark) {
       res = new G1InitialMarkClosures<G1MarkPromotedFromRoot>(g1h, pss);
     } else {
       res = new G1InitialMarkClosures<G1MarkFromRoot>(g1h, pss);
     }
   } else {
-    res = new G1EvacuationClosures(g1h, pss, g1h->collector_state()->gcs_are_young());
+    res = new G1EvacuationClosures(g1h, pss, g1h->collector_state()->in_young_only_phase());
   }
   return res;
 }
--- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -133,7 +133,7 @@
   // as implicitly live).
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SATBFiltering, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->collector_state()->mark_in_progress()) {
+    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->collector_state()->mark_or_rebuild_in_progress()) {
       JavaThread::satb_mark_queue_set().filter_thread_buffers();
     }
   }
--- a/src/hotspot/share/gc/g1/g1StringDedupQueue.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1StringDedupQueue.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,11 +27,11 @@
 #include "gc/g1/g1CollectedHeap.hpp"
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/g1StringDedupQueue.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "utilities/stack.inline.hpp"
 
 G1StringDedupQueue* G1StringDedupQueue::_queue = NULL;
--- a/src/hotspot/share/gc/g1/g1StringDedupTable.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1StringDedupTable.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -29,13 +29,13 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/g1StringDedupTable.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "logging/log.hpp"
 #include "memory/padded.inline.hpp"
 #include "oops/arrayOop.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/typeArrayOop.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/safepointVerifiers.hpp"
 
 //
 // List of deduplication table entries. Links table
--- a/src/hotspot/share/gc/g1/g1StringDedupThread.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1StringDedupThread.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -30,6 +30,7 @@
 #include "gc/g1/g1StringDedupThread.hpp"
 #include "gc/shared/suspendibleThreadSet.hpp"
 #include "logging/log.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
 
@@ -66,7 +67,7 @@
 
   virtual void do_oop(oop* p) { ShouldNotReachHere(); }
   virtual void do_oop(narrowOop* p) {
-    oop java_string = oopDesc::load_decode_heap_oop(p);
+    oop java_string = RawAccess<>::oop_load(p);
     G1StringDedupTable::deduplicate(java_string, _stat);
   }
 };
--- a/src/hotspot/share/gc/g1/g1YCTypes.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1YCTypes.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
 enum G1YCType {
   Normal,
   InitialMark,
-  DuringMark,
+  DuringMarkOrRebuild,
   Mixed,
   G1YCTypeEndSentinel
 };
@@ -41,7 +41,7 @@
     switch(type) {
       case Normal: return "Normal";
       case InitialMark: return "Initial Mark";
-      case DuringMark: return "During Mark";
+      case DuringMarkOrRebuild: return "During Mark";
       case Mixed: return "Mixed";
       default: ShouldNotReachHere(); return NULL;
     }
--- a/src/hotspot/share/gc/g1/g1_globals.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1_globals.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -184,9 +184,6 @@
           "-1 means print all.")                                            \
           range(-1, max_jint)                                               \
                                                                             \
-  develop(bool, G1ScrubRemSets, true,                                       \
-          "When true, do RS scrubbing after cleanup.")                      \
-                                                                            \
   product(uintx, G1ReservePercent, 10,                                      \
           "It determines the minimum reserve we should have in the heap "   \
           "to minimize the probability of promotion failure.")              \
@@ -213,16 +210,6 @@
           "during RSet scanning.")                                          \
           range(1, max_uintx)                                               \
                                                                             \
-  develop(uintx, G1SecondaryFreeListAppendLength, 5,                        \
-          "The number of regions we will add to the secondary free list "   \
-          "at every append operation")                                      \
-                                                                            \
-  develop(bool, G1StressConcRegionFreeing, false,                           \
-          "It stresses the concurrent region freeing operation")            \
-                                                                            \
-  develop(uintx, G1StressConcRegionFreeingDelayMillis, 0,                   \
-          "Artificial delay during concurrent region freeing")              \
-                                                                            \
   develop(uintx, G1DummyRegionsPerGC, 0,                                    \
           "The number of dummy regions G1 will allocate at the end of "     \
           "each evacuation pause in order to artificially fill up the "     \
@@ -269,6 +256,10 @@
           "Try to reclaim dead large objects that have a few stale "        \
           "references at every young GC.")                                  \
                                                                             \
+  experimental(size_t, G1RebuildRemSetChunkSize, 256 * K,                   \
+          "Chunk size used for rebuilding the remembered set.")             \
+          range(4 * K, 32 * M)                                              \
+                                                                            \
   experimental(uintx, G1OldCSetRegionThresholdPercent, 10,                  \
           "An upper bound for the number of old CSet regions expressed "    \
           "as a percentage of the heap size.")                              \
--- a/src/hotspot/share/gc/g1/g1_specialized_oop_closures.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/g1_specialized_oop_closures.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,9 @@
 class G1RootRegionScanClosure;
 
 class G1MarkAndPushClosure;
-class G1AdjustAndRebuildClosure;
+class G1AdjustClosure;
+
+class G1RebuildRemSetClosure;
 
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(f) \
       f(G1ScanEvacuatedObjClosure,_nv)             \
@@ -50,10 +52,11 @@
       f(G1ScanObjsDuringScanRSClosure,_nv)         \
       f(G1ConcurrentRefineOopClosure,_nv)          \
       f(G1CMOopClosure,_nv)                        \
-      f(G1RootRegionScanClosure,_nv)
+      f(G1RootRegionScanClosure,_nv)               \
+      f(G1RebuildRemSetClosure,_nv)
 
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1FULL(f) \
       f(G1MarkAndPushClosure,_nv)                      \
-      f(G1AdjustAndRebuildClosure,_nv)
+      f(G1AdjustClosure,_nv)
 
 #endif // SHARE_VM_GC_G1_G1_SPECIALIZED_OOP_CLOSURES_HPP
--- a/src/hotspot/share/gc/g1/heapRegion.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/heapRegion.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -39,6 +39,8 @@
 #include "logging/logStream.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/orderAccess.inline.hpp"
@@ -129,15 +131,10 @@
   zero_marked_bytes();
 
   init_top_at_mark_start();
-  _gc_time_stamp = G1CollectedHeap::heap()->get_gc_time_stamp();
   if (clear_space) clear(SpaceDecorator::Mangle);
 }
 
-void HeapRegion::par_clear() {
-  assert(used() == 0, "the region should have been already cleared");
-  assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
-  HeapRegionRemSet* hrrs = rem_set();
-  hrrs->clear();
+void HeapRegion::clear_cardtable() {
   G1CardTable* ct = G1CollectedHeap::heap()->card_table();
   ct->clear(MemRegion(bottom(), end()));
 }
@@ -256,7 +253,6 @@
 
   hr_clear(false /*par*/, false /*clear_space*/);
   set_top(bottom());
-  record_timestamp();
 }
 
 void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
@@ -325,9 +321,9 @@
   bool _has_oops_in_region;
 
   template <class T> void do_oop_work(T* p) {
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    T heap_oop = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(heap_oop)) {
+      oop obj = CompressedOops::decode_not_null(heap_oop);
 
       // Note: not all the oops embedded in the nmethod are in the
       // current region. We only look at those which are.
@@ -450,12 +446,11 @@
   } else {
     st->print("|  ");
   }
-  st->print("|TS%3u", _gc_time_stamp);
-  st->print_cr("|TAMS " PTR_FORMAT ", " PTR_FORMAT "|",
-               p2i(prev_top_at_mark_start()), p2i(next_top_at_mark_start()));
+  st->print_cr("|TAMS " PTR_FORMAT ", " PTR_FORMAT "| %s ",
+               p2i(prev_top_at_mark_start()), p2i(next_top_at_mark_start()), rem_set()->get_state_str());
 }
 
-class G1VerificationClosure : public OopClosure {
+class G1VerificationClosure : public ExtendedOopClosure {
 protected:
   G1CollectedHeap* _g1h;
   G1CardTable *_ct;
@@ -488,6 +483,9 @@
     obj->print_on(out);
 #endif // PRODUCT
   }
+
+  // This closure provides its own oop verification code.
+  debug_only(virtual bool should_verify_oops() { return false; })
 };
 
 class VerifyLiveClosure : public G1VerificationClosure {
@@ -506,10 +504,10 @@
 
   template <class T>
   void verify_liveness(T* p) {
-    T heap_oop = oopDesc::load_heap_oop(p);
+    T heap_oop = RawAccess<>::oop_load(p);
     Log(gc, verify) log;
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    if (!CompressedOops::is_null(heap_oop)) {
+      oop obj = CompressedOops::decode_not_null(heap_oop);
       bool failed = false;
       if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
         MutexLockerEx x(ParGCRareEvent_lock,
@@ -525,7 +523,8 @@
             p2i(p), p2i(_containing_obj), p2i(from->bottom()), p2i(from->end()));
           LogStream ls(log.error());
           print_object(&ls, _containing_obj);
-          log.error("points to obj " PTR_FORMAT " not in the heap", p2i(obj));
+          HeapRegion* const to = _g1h->heap_region_containing(obj);
+          log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT " remset %s", p2i(obj), HR_FORMAT_PARAMS(to), to->rem_set()->get_state_str());
         } else {
           HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
           HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
@@ -562,15 +561,16 @@
 
   template <class T>
   void verify_remembered_set(T* p) {
-    T heap_oop = oopDesc::load_heap_oop(p);
+    T heap_oop = RawAccess<>::oop_load(p);
     Log(gc, verify) log;
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    if (!CompressedOops::is_null(heap_oop)) {
+      oop obj = CompressedOops::decode_not_null(heap_oop);
       HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
       HeapRegion* to = _g1h->heap_region_containing(obj);
       if (from != NULL && to != NULL &&
         from != to &&
-        !to->is_pinned()) {
+        !to->is_pinned() &&
+        to->rem_set()->is_complete()) {
         jbyte cv_obj = *_ct->byte_for_const(_containing_obj);
         jbyte cv_field = *_ct->byte_for_const(p);
         const jbyte dirty = G1CardTable::dirty_card_val();
@@ -593,7 +593,7 @@
           ResourceMark rm;
           LogStream ls(log.error());
           _containing_obj->print_on(&ls);
-          log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT, p2i(obj), HR_FORMAT_PARAMS(to));
+          log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT " remset %s", p2i(obj), HR_FORMAT_PARAMS(to), to->rem_set()->get_state_str());
           if (oopDesc::is_oop(obj)) {
             obj->print_on(&ls);
           }
@@ -608,7 +608,7 @@
 };
 
 // Closure that applies the given two closures in sequence.
-class G1Mux2Closure : public OopClosure {
+class G1Mux2Closure : public ExtendedOopClosure {
   OopClosure* _c1;
   OopClosure* _c2;
 public:
@@ -620,6 +620,9 @@
   }
   virtual inline void do_oop(oop* p) { do_oop_work(p); }
   virtual inline void do_oop(narrowOop* p) { do_oop_work(p); }
+
+  // This closure provides its own oop verification code.
+  debug_only(virtual bool should_verify_oops() { return false; })
 };
 
 // This really ought to be commoned up into OffsetTableContigSpace somehow.
@@ -643,9 +646,7 @@
     if (!g1->is_obj_dead_cond(obj, this, vo)) {
       if (oopDesc::is_oop(obj)) {
         Klass* klass = obj->klass();
-        bool is_metaspace_object = Metaspace::contains(klass) ||
-                                   (vo == VerifyOption_G1UsePrevMarking &&
-                                   ClassLoaderDataGraph::unload_list_contains(klass));
+        bool is_metaspace_object = Metaspace::contains(klass);
         if (!is_metaspace_object) {
           log_error(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " "
                                 "not metadata", p2i(klass), p2i(obj));
@@ -658,11 +659,11 @@
           return;
         } else {
           vl_cl.set_containing_obj(obj);
-          if (!g1->collector_state()->full_collection() || G1VerifyRSetsDuringFullGC) {
+          if (!g1->collector_state()->in_full_gc() || G1VerifyRSetsDuringFullGC) {
             // verify liveness and rem_set
             vr_cl.set_containing_obj(obj);
             G1Mux2Closure mux(&vl_cl, &vr_cl);
-            obj->oop_iterate_no_header(&mux);
+            obj->oop_iterate(&mux);
 
             if (vr_cl.failures()) {
               *failures = true;
@@ -673,7 +674,7 @@
             }
           } else {
             // verify only liveness
-            obj->oop_iterate_no_header(&vl_cl);
+            obj->oop_iterate(&vl_cl);
           }
           if (vl_cl.failures()) {
             *failures = true;
@@ -789,7 +790,7 @@
     if (!g1->is_obj_dead_cond(obj, this, vo)) {
       if (oopDesc::is_oop(obj)) {
         vr_cl.set_containing_obj(obj);
-        obj->oop_iterate_no_header(&vr_cl);
+        obj->oop_iterate(&vr_cl);
 
         if (vr_cl.failures()) {
           *failures = true;
@@ -856,15 +857,6 @@
   return _bot_part.threshold();
 }
 
-void G1ContiguousSpace::record_timestamp() {
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  uint curr_gc_time_stamp = g1h->get_gc_time_stamp();
-
-  if (_gc_time_stamp < curr_gc_time_stamp) {
-    _gc_time_stamp = curr_gc_time_stamp;
-  }
-}
-
 void G1ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
   object_iterate(blk);
 }
@@ -881,8 +873,7 @@
 
 G1ContiguousSpace::G1ContiguousSpace(G1BlockOffsetTable* bot) :
   _bot_part(bot, this),
-  _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
-  _gc_time_stamp(0)
+  _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
 {
 }
 
--- a/src/hotspot/share/gc/g1/heapRegion.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/heapRegion.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -100,7 +100,6 @@
  protected:
   G1BlockOffsetTablePart _bot_part;
   Mutex _par_alloc_lock;
-  volatile uint _gc_time_stamp;
   // When we need to retire an allocation region, while other threads
   // are also concurrently trying to allocate into it, we typically
   // allocate a dummy object at the end of the region to ensure that
@@ -147,10 +146,6 @@
   void mangle_unused_area() PRODUCT_RETURN;
   void mangle_unused_area_complete() PRODUCT_RETURN;
 
-  void record_timestamp();
-  void reset_gc_time_stamp() { _gc_time_stamp = 0; }
-  uint get_gc_time_stamp() { return _gc_time_stamp; }
-
   // See the comment above in the declaration of _pre_dummy_top for an
   // explanation of what it is.
   void set_pre_dummy_top(HeapWord* pre_dummy_top) {
@@ -506,10 +501,11 @@
 
   // Reset the HeapRegion to default values.
   // If skip_remset is true, do not clear the remembered set.
+  // If clear_space is true, clear the HeapRegion's memory.
+  // If locked is true, assume we are the only thread doing this operation.
   void hr_clear(bool skip_remset, bool clear_space, bool locked = false);
-  // Clear the parts skipped by skip_remset in hr_clear() in the HeapRegion during
-  // a concurrent phase.
-  void par_clear();
+  // Clear the card table corresponding to this region.
+  void clear_cardtable();
 
   // Get the start of the unmarked area in this region.
   HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
@@ -713,6 +709,7 @@
 class HeapRegionClosure : public StackObj {
   friend class HeapRegionManager;
   friend class G1CollectionSet;
+  friend class CollectionSetChooser;
 
   bool _is_complete;
   void set_incomplete() { _is_complete = false; }
--- a/src/hotspot/share/gc/g1/heapRegion.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/heapRegion.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -247,6 +247,7 @@
 
 inline void HeapRegion::note_end_of_marking() {
   _prev_top_at_mark_start = _next_top_at_mark_start;
+  _next_top_at_mark_start = bottom();
   _prev_marked_bytes = _next_marked_bytes;
   _next_marked_bytes = 0;
 }
--- a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,7 +26,6 @@
 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1ConcurrentRefine.hpp"
-#include "gc/g1/g1CardLiveData.inline.hpp"
 #include "gc/g1/heapRegionManager.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/shared/space.inline.hpp"
@@ -40,6 +39,9 @@
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/growableArray.hpp"
 
+const char* HeapRegionRemSet::_state_strings[] =  {"Untracked", "Updating", "Complete"};
+const char* HeapRegionRemSet::_short_state_strings[] =  {"UNTRA", "UPDAT", "CMPLT"};
+
 class PerRegionTable: public CHeapObj<mtGC> {
   friend class OtherRegionsTable;
   friend class HeapRegionRemSetIterator;
@@ -64,10 +66,6 @@
   // We need access in order to union things into the base table.
   BitMap* bm() { return &_bm; }
 
-  void recount_occupied() {
-    _occupied = (jint) bm()->count_one_bits();
-  }
-
   PerRegionTable(HeapRegion* hr) :
     _hr(hr),
     _occupied(0),
@@ -96,17 +94,8 @@
     // If the test below fails, then this table was reused concurrently
     // with this operation.  This is OK, since the old table was coarsened,
     // and adding a bit to the new table is never incorrect.
-    // If the table used to belong to a continues humongous region and is
-    // now reused for the corresponding start humongous region, we need to
-    // make sure that we detect this. Thus, we call is_in_reserved_raw()
-    // instead of just is_in_reserved() here.
     if (loc_hr->is_in_reserved(from)) {
-      size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
-      CardIdx_t from_card = (CardIdx_t)
-          hw_offset >> (G1CardTable::card_shift - LogHeapWordSize);
-
-      assert((size_t)from_card < HeapRegion::CardsPerRegion,
-             "Must be in range.");
+      CardIdx_t from_card = OtherRegionsTable::card_within_region(from, loc_hr);
       add_card_work(from_card, par);
     }
   }
@@ -142,11 +131,6 @@
     add_reference_work(from, /*parallel*/ false);
   }
 
-  void scrub(G1CardLiveData* live_data) {
-    live_data->remove_nonlive_cards(hr()->hrm_index(), &_bm);
-    recount_occupied();
-  }
-
   void add_card(CardIdx_t from_card_index) {
     add_card_work(from_card_index, /*parallel*/ true);
   }
@@ -351,10 +335,18 @@
          "just checking");
 }
 
+CardIdx_t OtherRegionsTable::card_within_region(OopOrNarrowOopStar within_region, HeapRegion* hr) {
+  assert(hr->is_in_reserved(within_region),
+         "HeapWord " PTR_FORMAT " is outside of region %u [" PTR_FORMAT ", " PTR_FORMAT ")",
+         p2i(within_region), hr->hrm_index(), p2i(hr->bottom()), p2i(hr->end()));
+  CardIdx_t result = (CardIdx_t)(pointer_delta((HeapWord*)within_region, hr->bottom()) >> (CardTable::card_shift - LogHeapWordSize));
+  return result;
+}
+
 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
   uint cur_hrm_ind = _hr->hrm_index();
 
-  int from_card = (int)(uintptr_t(from) >> G1CardTable::card_shift);
+  uintptr_t from_card = uintptr_t(from) >> CardTable::card_shift;
 
   if (G1FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) {
     assert(contains_reference(from), "We just found " PTR_FORMAT " in the FromCardCache", p2i(from));
@@ -380,12 +372,8 @@
     prt = find_region_table(ind, from_hr);
     if (prt == NULL) {
 
-      uintptr_t from_hr_bot_card_index =
-        uintptr_t(from_hr->bottom())
-          >> G1CardTable::card_shift;
-      CardIdx_t card_index = from_card - from_hr_bot_card_index;
-      assert((size_t)card_index < HeapRegion::CardsPerRegion,
-             "Must be in range.");
+      CardIdx_t card_index = card_within_region(from, from_hr);
+
       if (G1HRRSUseSparseTable &&
           _sparse_table.add_card(from_hrm_ind, card_index)) {
         assert(contains_reference_locked(from), "We just added " PTR_FORMAT " to the Sparse table", p2i(from));
@@ -436,7 +424,7 @@
   assert(prt != NULL, "Inv");
 
   prt->add_reference(from);
-  assert(contains_reference(from), "We just added " PTR_FORMAT " to the PRT", p2i(from));
+  assert(contains_reference(from), "We just added " PTR_FORMAT " to the PRT (%d)", p2i(from), prt->contains_reference(from));
 }
 
 PerRegionTable*
@@ -509,56 +497,6 @@
   return max;
 }
 
-void OtherRegionsTable::scrub(G1CardLiveData* live_data) {
-  // First eliminated garbage regions from the coarse map.
-  log_develop_trace(gc, remset, scrub)("Scrubbing region %u:", _hr->hrm_index());
-
-  log_develop_trace(gc, remset, scrub)("   Coarse map: before = " SIZE_FORMAT "...", _n_coarse_entries);
-  if (_n_coarse_entries > 0) {
-    live_data->remove_nonlive_regions(&_coarse_map);
-    _n_coarse_entries = _coarse_map.count_one_bits();
-  }
-  log_develop_trace(gc, remset, scrub)("   after = " SIZE_FORMAT ".", _n_coarse_entries);
-
-  // Now do the fine-grained maps.
-  for (size_t i = 0; i < _max_fine_entries; i++) {
-    PerRegionTable* cur = _fine_grain_regions[i];
-    PerRegionTable** prev = &_fine_grain_regions[i];
-    while (cur != NULL) {
-      PerRegionTable* nxt = cur->collision_list_next();
-      // If the entire region is dead, eliminate.
-      log_develop_trace(gc, remset, scrub)("     For other region %u:", cur->hr()->hrm_index());
-      if (!live_data->is_region_live(cur->hr()->hrm_index())) {
-        *prev = nxt;
-        cur->set_collision_list_next(NULL);
-        _n_fine_entries--;
-        log_develop_trace(gc, remset, scrub)("          deleted via region map.");
-        unlink_from_all(cur);
-        PerRegionTable::free(cur);
-      } else {
-        // Do fine-grain elimination.
-        log_develop_trace(gc, remset, scrub)("          occ: before = %4d.", cur->occupied());
-        cur->scrub(live_data);
-        log_develop_trace(gc, remset, scrub)("          after = %4d.", cur->occupied());
-        // Did that empty the table completely?
-        if (cur->occupied() == 0) {
-          *prev = nxt;
-          cur->set_collision_list_next(NULL);
-          _n_fine_entries--;
-          unlink_from_all(cur);
-          PerRegionTable::free(cur);
-        } else {
-          prev = cur->collision_list_next_addr();
-        }
-      }
-      cur = nxt;
-    }
-  }
-  // Since we may have deleted a from_card_cache entry from the RS, clear
-  // the FCC.
-  clear_fcc();
-}
-
 bool OtherRegionsTable::occupancy_less_or_equal_than(size_t limit) const {
   if (limit <= (size_t)G1RSetSparseRegionEntries) {
     return occ_coarse() == 0 && _first_all_fine_prts == NULL && occ_sparse() <= limit;
@@ -665,19 +603,12 @@
   if (_coarse_map.at(hr_ind)) return true;
 
   PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
-                                     hr);
+                                          hr);
   if (prt != NULL) {
     return prt->contains_reference(from);
 
   } else {
-    uintptr_t from_card =
-      (uintptr_t(from) >> G1CardTable::card_shift);
-    uintptr_t hr_bot_card_index =
-      uintptr_t(hr->bottom()) >> G1CardTable::card_shift;
-    assert(from_card >= hr_bot_card_index, "Inv");
-    CardIdx_t card_index = from_card - hr_bot_card_index;
-    assert((size_t)card_index < HeapRegion::CardsPerRegion,
-           "Must be in range.");
+    CardIdx_t card_index = card_within_region(from, hr);
     return _sparse_table.contains_card(hr_ind, card_index);
   }
 }
@@ -692,6 +623,7 @@
   : _bot(bot),
     _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Monitor::_safepoint_check_never),
     _code_roots(),
+    _state(Untracked),
     _other_regions(hr, &_m) {
 }
 
@@ -713,21 +645,20 @@
   SparsePRT::cleanup_all();
 }
 
-void HeapRegionRemSet::clear() {
+void HeapRegionRemSet::clear(bool only_cardset) {
   MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
-  clear_locked();
+  clear_locked(only_cardset);
 }
 
-void HeapRegionRemSet::clear_locked() {
-  _code_roots.clear();
+void HeapRegionRemSet::clear_locked(bool only_cardset) {
+  if (!only_cardset) {
+    _code_roots.clear();
+  }
   _other_regions.clear();
+  set_state_empty();
   assert(occupied_locked() == 0, "Should be clear.");
 }
 
-void HeapRegionRemSet::scrub(G1CardLiveData* live_data) {
-  _other_regions.scrub(live_data);
-}
-
 // Code roots support
 //
 // The code root set is protected by two separate locking schemes
@@ -903,8 +834,7 @@
   _other_regions.do_cleanup_work(hrrs_cleanup_task);
 }
 
-void
-HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
+void HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
   SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
 }
 
--- a/src/hotspot/share/gc/g1/heapRegionRemSet.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/heapRegionRemSet.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -123,15 +123,17 @@
 
   bool contains_reference_locked(OopOrNarrowOopStar from) const;
 
+public:
   // Clear the from_card_cache entries for this region.
   void clear_fcc();
-public:
   // Create a new remembered set for the given heap region. The given mutex should
   // be used to ensure consistency.
   OtherRegionsTable(HeapRegion* hr, Mutex* m);
 
-  // For now.  Could "expand" some tables in the future, so that this made
-  // sense.
+  // Returns the card index of the given within_region pointer relative to the bottom
+  // of the given heap region.
+  static CardIdx_t card_within_region(OopOrNarrowOopStar within_region, HeapRegion* hr);
+  // Adds the reference from "from to this remembered set.
   void add_reference(OopOrNarrowOopStar from, uint tid);
 
   // Returns whether the remembered set contains the given reference.
@@ -141,11 +143,6 @@
   // that is less or equal than the given occupancy.
   bool occupancy_less_or_equal_than(size_t limit) const;
 
-  // Removes any entries shown by the given bitmaps to contain only dead
-  // objects. Not thread safe.
-  // Set bits in the bitmaps indicate that the given region or card is live.
-  void scrub(G1CardLiveData* live_data);
-
   // Returns whether this remembered set (and all sub-sets) does not contain any entry.
   bool is_empty() const;
 
@@ -217,24 +214,64 @@
 
   static jint n_coarsenings() { return OtherRegionsTable::n_coarsenings(); }
 
+private:
+  enum RemSetState {
+    Untracked,
+    Updating,
+    Complete
+  };
+
+  RemSetState _state;
+
+  static const char* _state_strings[];
+  static const char* _short_state_strings[];
+public:
+
+  const char* get_state_str() const { return _state_strings[_state]; }
+  const char* get_short_state_str() const { return _short_state_strings[_state]; }
+
+  bool is_tracked() { return _state != Untracked; }
+  bool is_updating() { return _state == Updating; }
+  bool is_complete() { return _state == Complete; }
+
+  void set_state_empty() {
+    guarantee(SafepointSynchronize::is_at_safepoint() || !is_tracked(), "Should only set to Untracked during safepoint but is %s.", get_state_str());
+    if (_state == Untracked) {
+      return;
+    }
+    _other_regions.clear_fcc();
+    _state = Untracked;
+  }
+
+  void set_state_updating() {
+    guarantee(SafepointSynchronize::is_at_safepoint() && !is_tracked(), "Should only set to Updating from Untracked during safepoint but is %s", get_state_str());
+    _other_regions.clear_fcc();
+    _state = Updating;
+  }
+
+  void set_state_complete() {
+    _other_regions.clear_fcc();
+    _state = Complete;
+  }
+
   // Used in the sequential case.
   void add_reference(OopOrNarrowOopStar from) {
-    _other_regions.add_reference(from, 0);
+    add_reference(from, 0);
   }
 
   // Used in the parallel case.
   void add_reference(OopOrNarrowOopStar from, uint tid) {
+    RemSetState state = _state;
+    if (state == Untracked) {
+      return;
+    }
     _other_regions.add_reference(from, tid);
   }
 
-  // Removes any entries in the remembered set shown by the given card live data to
-  // contain only dead objects. Not thread safe.
-  void scrub(G1CardLiveData* live_data);
-
   // The region is being reclaimed; clear its remset, and any mention of
   // entries for this region in other remsets.
-  void clear();
-  void clear_locked();
+  void clear(bool only_cardset = false);
+  void clear_locked(bool only_cardset = false);
 
   // The actual # of bytes this hr_remset takes up.
   // Note also includes the strong code root set.
--- a/src/hotspot/share/gc/g1/heapRegionSet.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/heapRegionSet.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -321,14 +321,6 @@
   }
 }
 
-void SecondaryFreeRegionListMtSafeChecker::check() {
-  // Secondary Free List MT safety protocol:
-  // Operations on the secondary free list should always be invoked
-  // while holding the SecondaryFreeList_lock.
-
-  guarantee(SecondaryFreeList_lock->owned_by_self(), "secondary free list MT safety protocol");
-}
-
 void OldRegionSetMtSafeChecker::check() {
   // Master Old Set MT safety protocol:
   // (a) If we're at a safepoint, operations on the master old set
--- a/src/hotspot/share/gc/g1/heapRegionSet.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/heapRegionSet.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -59,7 +59,6 @@
 };
 
 class MasterFreeRegionListMtSafeChecker    : public HRSMtSafeChecker { public: void check(); };
-class SecondaryFreeRegionListMtSafeChecker : public HRSMtSafeChecker { public: void check(); };
 class HumongousRegionSetMtSafeChecker      : public HRSMtSafeChecker { public: void check(); };
 class OldRegionSetMtSafeChecker            : public HRSMtSafeChecker { public: void check(); };
 
--- a/src/hotspot/share/gc/g1/vm_operations_g1.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/g1/vm_operations_g1.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -23,8 +23,8 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/g1/concurrentMarkThread.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
 #include "gc/g1/g1Policy.hpp"
 #include "gc/shared/gcId.hpp"
 #include "gc/g1/vm_operations_g1.hpp"
--- a/src/hotspot/share/gc/parallel/parallelArguments.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/parallel/parallelArguments.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -38,8 +38,8 @@
   return CollectorPolicy::compute_heap_alignment();
 }
 
-void ParallelArguments::initialize_flags() {
-  GCArguments::initialize_flags();
+void ParallelArguments::initialize() {
+  GCArguments::initialize();
   assert(UseParallelGC || UseParallelOldGC, "Error");
   // Enable ParallelOld unless it was explicitly disabled (cmd line or rc file).
   if (FLAG_IS_DEFAULT(UseParallelOldGC)) {
--- a/src/hotspot/share/gc/parallel/parallelArguments.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/parallel/parallelArguments.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -31,7 +31,7 @@
 
 class ParallelArguments : public GCArguments {
 public:
-  virtual void initialize_flags();
+  virtual void initialize();
   virtual size_t conservative_max_heap_alignment();
   virtual CollectedHeap* create_heap();
 };
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -38,7 +38,7 @@
 #include "gc/parallel/psScavenge.hpp"
 #include "gc/parallel/vmPSOperations.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcWhen.hpp"
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
@@ -622,7 +622,7 @@
 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
   CollectedHeap* heap = Universe::heap();
   assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Not a ParallelScavengeHeap");
+  assert(heap->kind() == CollectedHeap::Parallel, "Invalid name");
   return (ParallelScavengeHeap*)heap;
 }
 
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -102,7 +102,7 @@
   };
 
   virtual Name kind() const {
-    return CollectedHeap::ParallelScavengeHeap;
+    return CollectedHeap::Parallel;
   }
 
   virtual const char* name() const {
--- a/src/hotspot/share/gc/parallel/psCardTable.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/parallel/psCardTable.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -31,6 +31,7 @@
 #include "gc/parallel/psScavenge.hpp"
 #include "gc/parallel/psTasks.hpp"
 #include "gc/parallel/psYoungGen.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/prefetch.inline.hpp"
 #include "utilities/align.hpp"
@@ -45,7 +46,7 @@
 
  protected:
   template <class T> void do_oop_work(T* p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
+    oop obj = RawAccess<>::oop_load(p);
     if (_young_gen->is_in_reserved(obj) &&
         !_card_table->addr_is_marked_imprecise(p)) {
       // Don't overwrite the first missing card mark
@@ -102,7 +103,7 @@
 
  protected:
   template <class T> void do_oop_work(T* p) {
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
     if (_young_gen->is_in_reserved(obj)) {
       assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
       _card_table->set_card_newgen(p);
--- a/src/hotspot/share/gc/parallel/psCompactionManager.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -34,6 +34,8 @@
 #include "gc/shared/taskqueue.inline.hpp"
 #include "logging/log.hpp"
 #include "memory/iterator.inline.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/instanceMirrorKlass.inline.hpp"
 #include "oops/objArrayKlass.inline.hpp"
@@ -182,10 +184,10 @@
 template <class T>
 static void oop_pc_follow_contents_specialized(InstanceRefKlass* klass, oop obj, ParCompactionManager* cm) {
   T* referent_addr = (T*)java_lang_ref_Reference::referent_addr_raw(obj);
-  T heap_oop = oopDesc::load_heap_oop(referent_addr);
+  T heap_oop = RawAccess<>::oop_load(referent_addr);
   log_develop_trace(gc, ref)("InstanceRefKlass::oop_pc_follow_contents " PTR_FORMAT, p2i(obj));
-  if (!oopDesc::is_null(heap_oop)) {
-    oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop referent = CompressedOops::decode_not_null(heap_oop);
     if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) &&
         PSParallelCompact::ref_processor()->discover_reference(obj, klass->reference_type())) {
       // reference already enqueued, referent will be traversed later
@@ -201,8 +203,8 @@
   T* next_addr = (T*)java_lang_ref_Reference::next_addr_raw(obj);
   // Treat discovered as normal oop, if ref is not "active",
   // i.e. if next is non-NULL.
-  T  next_oop = oopDesc::load_heap_oop(next_addr);
-  if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
+  T  next_oop = RawAccess<>::oop_load(next_addr);
+  if (!CompressedOops::is_null(next_oop)) { // i.e. ref is not "active"
     T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr_raw(obj);
     log_develop_trace(gc, ref)("   Process discovered as normal " PTR_FORMAT, p2i(discovered_addr));
     cm->mark_and_push(discovered_addr);
--- a/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -29,7 +29,9 @@
 #include "gc/parallel/psCompactionManager.hpp"
 #include "gc/parallel/psParallelCompact.inline.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/arrayOop.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/objArrayOop.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "utilities/debug.hpp"
@@ -71,9 +73,9 @@
 
 template <typename T>
 inline void ParCompactionManager::mark_and_push(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
 
     if (mark_bitmap()->is_unmarked(obj) && PSParallelCompact::mark_obj(obj)) {
--- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -39,7 +39,7 @@
 #include "gc/shared/gcCause.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
 #include "gc/shared/gcId.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
--- a/src/hotspot/share/gc/parallel/psOldGen.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/parallel/psOldGen.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -30,7 +30,7 @@
 #include "gc/parallel/psMarkSweepDecorator.hpp"
 #include "gc/parallel/psOldGen.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/spaceDecorator.hpp"
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -44,7 +44,7 @@
 #include "gc/shared/gcCause.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
 #include "gc/shared/gcId.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
@@ -55,6 +55,7 @@
 #include "gc/shared/weakProcessor.hpp"
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/instanceMirrorKlass.inline.hpp"
 #include "oops/methodData.hpp"
@@ -3078,11 +3079,11 @@
                                                   T* discovered_addr) {
   log_develop_trace(gc, ref)("%s obj " PTR_FORMAT, s, p2i(obj));
   log_develop_trace(gc, ref)("     referent_addr/* " PTR_FORMAT " / " PTR_FORMAT,
-                             p2i(referent_addr), referent_addr ? p2i(oopDesc::load_decode_heap_oop(referent_addr)) : NULL);
+                             p2i(referent_addr), referent_addr ? p2i((oop)RawAccess<>::oop_load(referent_addr)) : NULL);
   log_develop_trace(gc, ref)("     next_addr/* " PTR_FORMAT " / " PTR_FORMAT,
-                             p2i(next_addr), next_addr ? p2i(oopDesc::load_decode_heap_oop(next_addr)) : NULL);
+                             p2i(next_addr), next_addr ? p2i((oop)RawAccess<>::oop_load(next_addr)) : NULL);
   log_develop_trace(gc, ref)("     discovered_addr/* " PTR_FORMAT " / " PTR_FORMAT,
-                             p2i(discovered_addr), discovered_addr ? p2i(oopDesc::load_decode_heap_oop(discovered_addr)) : NULL);
+                             p2i(discovered_addr), discovered_addr ? p2i((oop)RawAccess<>::oop_load(discovered_addr)) : NULL);
 }
 #endif
 
--- a/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -29,6 +29,8 @@
 #include "gc/parallel/parMarkBitMap.inline.hpp"
 #include "gc/parallel/psParallelCompact.hpp"
 #include "gc/shared/collectedHeap.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.hpp"
 #include "oops/oop.inline.hpp"
 
@@ -105,9 +107,9 @@
 
 template <class T>
 inline void PSParallelCompact::adjust_pointer(T* p, ParCompactionManager* cm) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj     = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
 
     oop new_obj = (oop)summary_data().calc_new_pointer(obj, cm);
@@ -117,7 +119,7 @@
     if (new_obj != NULL) {
       assert(ParallelScavengeHeap::heap()->is_in_reserved(new_obj),
              "should be in object space");
-      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+      RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
     }
   }
 }
--- a/src/hotspot/share/gc/parallel/psPromotionManager.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -38,7 +38,9 @@
 #include "memory/memRegion.hpp"
 #include "memory/padded.inline.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/arrayOop.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/instanceMirrorKlass.inline.hpp"
 #include "oops/objArrayKlass.inline.hpp"
@@ -451,8 +453,8 @@
   // Treat discovered as normal oop, if ref is not "active",
   // i.e. if next is non-NULL.
   T* next_addr = (T*)java_lang_ref_Reference::next_addr_raw(obj);
-  T  next_oop = oopDesc::load_heap_oop(next_addr);
-  if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
+  T  next_oop = RawAccess<>::oop_load(next_addr);
+  if (!CompressedOops::is_null(next_oop)) { // i.e. ref is not "active"
     T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr_raw(obj);
     log_develop_trace(gc, ref)("   Process discovered as normal " PTR_FORMAT, p2i(discovered_addr));
     if (PSScavenge::should_scavenge(discovered_addr)) {
--- a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -33,6 +33,7 @@
 #include "gc/parallel/psScavenge.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
 #include "logging/log.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 
 inline PSPromotionManager* PSPromotionManager::manager_array(uint index) {
@@ -49,14 +50,14 @@
 template <class T>
 inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
   if (p != NULL) { // XXX: error if p != NULL here
-    oop o = oopDesc::load_decode_heap_oop_not_null(p);
+    oop o = RawAccess<OOP_NOT_NULL>::oop_load(p);
     if (o->is_forwarded()) {
       o = o->forwardee();
       // Card mark
       if (PSScavenge::is_obj_in_young(o)) {
         PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
       }
-      oopDesc::encode_store_heap_oop_not_null(p, o);
+      RawAccess<OOP_NOT_NULL>::oop_store(p, o);
     } else {
       push_depth(p);
     }
@@ -278,7 +279,7 @@
 inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) {
   assert(should_scavenge(p, true), "revisiting object?");
 
-  oop o = oopDesc::load_decode_heap_oop_not_null(p);
+  oop o = RawAccess<OOP_NOT_NULL>::oop_load(p);
   oop new_obj = o->is_forwarded()
         ? o->forwardee()
         : copy_to_survivor_space<promote_immediately>(o);
@@ -291,7 +292,7 @@
                       new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
   }
 
-  oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+  RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
 
   // We cannot mark without test, as some code passes us pointers
   // that are outside the heap. These pointers are either from roots
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -36,7 +36,7 @@
 #include "gc/shared/gcCause.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
 #include "gc/shared/gcId.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
@@ -47,6 +47,8 @@
 #include "gc/shared/weakProcessor.hpp"
 #include "memory/resourceArea.hpp"
 #include "logging/log.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/handles.inline.hpp"
@@ -93,8 +95,7 @@
   }
 
   template <class T> void do_oop_work(T* p) {
-    assert (!oopDesc::is_null(*p), "expected non-null ref");
-    assert (oopDesc::is_oop(oopDesc::load_decode_heap_oop_not_null(p)),
+    assert (oopDesc::is_oop(RawAccess<OOP_NOT_NULL>::oop_load(p)),
             "expected an oop while scanning weak refs");
 
     // Weak refs may be visited more than once.
@@ -738,7 +739,7 @@
 void PSScavenge::set_young_generation_boundary(HeapWord* v) {
   _young_generation_boundary = v;
   if (UseCompressedOops) {
-    _young_generation_boundary_compressed = (uintptr_t)oopDesc::encode_heap_oop((oop)v);
+    _young_generation_boundary_compressed = (uintptr_t)CompressedOops::encode((oop)v);
   }
 }
 
--- a/src/hotspot/share/gc/parallel/psScavenge.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/parallel/psScavenge.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -31,6 +31,7 @@
 #include "logging/log.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 inline void PSScavenge::save_to_space_top_before_gc() {
@@ -39,14 +40,14 @@
 }
 
 template <class T> inline bool PSScavenge::should_scavenge(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
+  T heap_oop = RawAccess<>::oop_load(p);
   return PSScavenge::is_obj_in_young(heap_oop);
 }
 
 template <class T>
 inline bool PSScavenge::should_scavenge(T* p, MutableSpace* to_space) {
   if (should_scavenge(p)) {
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
     // Skip objects copied to to_space since the scavenge started.
     HeapWord* const addr = (HeapWord*)obj;
     return addr < to_space_top_before_gc() || addr >= to_space->end();
@@ -107,7 +108,7 @@
       } else {
         new_obj = _pm->copy_to_survivor_space</*promote_immediately=*/false>(o);
       }
-      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+      RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
 
       if (PSScavenge::is_obj_in_young(new_obj)) {
         do_cld_barrier();
--- a/src/hotspot/share/gc/parallel/psTasks.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/parallel/psTasks.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_GC_PARALLEL_PSTASKS_HPP
 #define SHARE_VM_GC_PARALLEL_PSTASKS_HPP
 
-#include "memory/allocation.hpp"
 #include "utilities/growableArray.hpp"
 
 //
--- a/src/hotspot/share/gc/parallel/psVirtualspace.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/parallel/psVirtualspace.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_PARALLEL_PSVIRTUALSPACE_HPP
 #define SHARE_VM_GC_PARALLEL_PSVIRTUALSPACE_HPP
 
+#include "memory/allocation.hpp"
 #include "memory/virtualspace.hpp"
 
 // VirtualSpace for the parallel scavenge collector.
--- a/src/hotspot/share/gc/parallel/vmPSOperations.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/parallel/vmPSOperations.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,7 +27,7 @@
 #include "gc/parallel/psMarkSweep.hpp"
 #include "gc/parallel/psScavenge.hpp"
 #include "gc/parallel/vmPSOperations.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "utilities/dtrace.hpp"
 
 // The following methods are used by the parallel scavenge collector
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -29,7 +29,7 @@
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/collectorCounters.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcPolicyCounters.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
--- a/src/hotspot/share/gc/serial/defNewGeneration.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/serial/defNewGeneration.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -30,6 +30,7 @@
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/genOopClosures.inline.hpp"
 #include "gc/shared/space.hpp"
+#include "oops/access.inline.hpp"
 
 // Methods of protected closure types
 
@@ -39,8 +40,7 @@
   {
     // We never expect to see a null reference being processed
     // as a weak reference.
-    assert (!oopDesc::is_null(*p), "expected non-null ref");
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
     assert (oopDesc::is_oop(obj), "expected an oop while scanning weak refs");
   }
 #endif // ASSERT
@@ -61,7 +61,7 @@
   // dirty cards in the young gen are never scanned, so the
   // extra check probably isn't worthwhile.
   if (GenCollectedHeap::heap()->is_in_reserved(p)) {
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
     _rs->inline_write_ref_field_gc(p, obj);
   }
 }
@@ -72,8 +72,7 @@
   {
     // We never expect to see a null reference being processed
     // as a weak reference.
-    assert (!oopDesc::is_null(*p), "expected non-null ref");
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
     assert (oopDesc::is_oop(obj), "expected an oop while scanning weak refs");
   }
 #endif // ASSERT
@@ -83,7 +82,7 @@
   // Optimized for Defnew generation if it's the youngest generation:
   // we set a younger_gen card if we have an older->youngest
   // generation pointer.
-  oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+  oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
   if (((HeapWord*)obj < _boundary) && GenCollectedHeap::heap()->is_in_reserved(p)) {
     _rs->inline_write_ref_field_gc(p, obj);
   }
--- a/src/hotspot/share/gc/serial/markSweep.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/serial/markSweep.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -30,6 +30,8 @@
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/specialized_oop_closures.hpp"
 #include "memory/iterator.inline.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/instanceClassLoaderKlass.inline.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/instanceMirrorKlass.inline.hpp"
@@ -73,9 +75,9 @@
 }
 
 template <class T> inline void MarkSweep::mark_and_push(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     if (!obj->mark()->is_marked()) {
       mark_object(obj);
       _marking_stack.push(obj);
@@ -169,9 +171,9 @@
 template <class T> inline void MarkSweep::follow_root(T* p) {
   assert(!Universe::heap()->is_in_reserved(p),
          "roots shouldn't be things within the heap");
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     if (!obj->mark()->is_marked()) {
       mark_object(obj);
       follow_object(obj);
--- a/src/hotspot/share/gc/serial/markSweep.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/serial/markSweep.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -29,6 +29,8 @@
 #include "memory/metaspaceShared.hpp"
 #include "memory/universe.hpp"
 #include "oops/markOop.inline.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 
 inline int MarkSweep::adjust_pointers(oop obj) {
@@ -36,9 +38,9 @@
 }
 
 template <class T> inline void MarkSweep::adjust_pointer(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj     = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     assert(Universe::heap()->is_in(obj), "should be in heap");
 
     oop new_obj = oop(obj->mark()->decode_pointer());
@@ -52,7 +54,7 @@
     if (new_obj != NULL) {
       assert(Universe::heap()->is_in_reserved(new_obj),
              "should be in object space");
-      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+      RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
     }
   }
 }
--- a/src/hotspot/share/gc/serial/serialHeap.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/serial/serialHeap.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -47,7 +47,7 @@
   SerialHeap(GenCollectorPolicy* policy);
 
   virtual Name kind() const {
-    return CollectedHeap::SerialHeap;
+    return CollectedHeap::Serial;
   }
 
   virtual const char* name() const {
--- a/src/hotspot/share/gc/shared/barrierSet.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/shared/barrierSet.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -262,6 +262,10 @@
     static oop resolve(oop obj) {
       return Raw::resolve(obj);
     }
+
+    static bool equals(oop o1, oop o2) {
+      return Raw::equals(o1, o2);
+    }
   };
 };
 
--- a/src/hotspot/share/gc/shared/cardTableRS.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/shared/cardTableRS.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -28,6 +28,7 @@
 #include "gc/shared/generation.hpp"
 #include "gc/shared/space.inline.hpp"
 #include "memory/allocation.inline.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/java.hpp"
@@ -351,7 +352,7 @@
            "Error: jp " PTR_FORMAT " should be within "
            "[_begin, _end) = [" PTR_FORMAT "," PTR_FORMAT ")",
            p2i(jp), p2i(_begin), p2i(_end));
-    oop obj = oopDesc::load_decode_heap_oop(p);
+    oop obj = RawAccess<>::oop_load(p);
     guarantee(obj == NULL || (HeapWord*)obj >= _boundary,
               "pointer " PTR_FORMAT " at " PTR_FORMAT " on "
               "clean card crosses boundary" PTR_FORMAT,
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/shared/collectedHeap.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -586,12 +586,50 @@
   initialize_serviceability();
 }
 
-oop CollectedHeap::pin_object(JavaThread* thread, oop o) {
-  Handle handle(thread, o);
-  GCLocker::lock_critical(thread);
-  return handle();
+#ifndef PRODUCT
+
+bool CollectedHeap::promotion_should_fail(volatile size_t* count) {
+  // Access to count is not atomic; the value does not have to be exact.
+  if (PromotionFailureALot) {
+    const size_t gc_num = total_collections();
+    const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
+    if (elapsed_gcs >= PromotionFailureALotInterval) {
+      // Test for unsigned arithmetic wrap-around.
+      if (++*count >= PromotionFailureALotCount) {
+        *count = 0;
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+bool CollectedHeap::promotion_should_fail() {
+  return promotion_should_fail(&_promotion_failure_alot_count);
 }
 
-void CollectedHeap::unpin_object(JavaThread* thread, oop o) {
-  GCLocker::unlock_critical(thread);
+void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) {
+  if (PromotionFailureALot) {
+    _promotion_failure_alot_gc_number = total_collections();
+    *count = 0;
+  }
+}
+
+void CollectedHeap::reset_promotion_should_fail() {
+  reset_promotion_should_fail(&_promotion_failure_alot_count);
 }
+
+#endif  // #ifndef PRODUCT
+
+bool CollectedHeap::supports_object_pinning() const {
+  return false;
+}
+
+oop CollectedHeap::pin_object(JavaThread* thread, oop obj) {
+  ShouldNotReachHere();
+  return NULL;
+}
+
+void CollectedHeap::unpin_object(JavaThread* thread, oop obj) {
+  ShouldNotReachHere();
+}
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -187,10 +187,11 @@
 
  public:
   enum Name {
-    SerialHeap,
-    ParallelScavengeHeap,
-    G1CollectedHeap,
-    CMSHeap
+    None,
+    Serial,
+    Parallel,
+    CMS,
+    G1
   };
 
   static inline size_t filler_array_max_size() {
@@ -588,27 +589,25 @@
   // perform cleanup tasks serially in the VMThread.
   virtual WorkGang* get_safepoint_workers() { return NULL; }
 
-  // Support for object pinning. This is used by JNI's Get*Critical() and
-  // Release*Critical() family of functions. A GC may either use the GCLocker
-  // protocol to ensure no critical arrays are in-use when entering
-  // a GC pause, or it can implement pinning, which must guarantee that
-  // the object does not move while pinned.
-  virtual oop pin_object(JavaThread* thread, oop o);
-
-  virtual void unpin_object(JavaThread* thread, oop o);
+  // Support for object pinning. This is used by JNI Get*Critical()
+  // and Release*Critical() family of functions. If supported, the GC
+  // must guarantee that pinned objects never move.
+  virtual bool supports_object_pinning() const;
+  virtual oop pin_object(JavaThread* thread, oop obj);
+  virtual void unpin_object(JavaThread* thread, oop obj);
 
   // Non product verification and debugging.
 #ifndef PRODUCT
   // Support for PromotionFailureALot.  Return true if it's time to cause a
   // promotion failure.  The no-argument version uses
   // this->_promotion_failure_alot_count as the counter.
-  inline bool promotion_should_fail(volatile size_t* count);
-  inline bool promotion_should_fail();
+  bool promotion_should_fail(volatile size_t* count);
+  bool promotion_should_fail();
 
   // Reset the PromotionFailureALot counters.  Should be called at the end of a
   // GC in which promotion failure occurred.
-  inline void reset_promotion_should_fail(volatile size_t* count);
-  inline void reset_promotion_should_fail();
+  void reset_promotion_should_fail(volatile size_t* count);
+  void reset_promotion_should_fail();
 #endif  // #ifndef PRODUCT
 
 #ifdef ASSERT
--- a/src/hotspot/share/gc/shared/collectedHeap.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/shared/collectedHeap.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -299,39 +299,4 @@
   }
 }
 
-#ifndef PRODUCT
-
-inline bool
-CollectedHeap::promotion_should_fail(volatile size_t* count) {
-  // Access to count is not atomic; the value does not have to be exact.
-  if (PromotionFailureALot) {
-    const size_t gc_num = total_collections();
-    const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
-    if (elapsed_gcs >= PromotionFailureALotInterval) {
-      // Test for unsigned arithmetic wrap-around.
-      if (++*count >= PromotionFailureALotCount) {
-        *count = 0;
-        return true;
-      }
-    }
-  }
-  return false;
-}
-
-inline bool CollectedHeap::promotion_should_fail() {
-  return promotion_should_fail(&_promotion_failure_alot_count);
-}
-
-inline void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) {
-  if (PromotionFailureALot) {
-    _promotion_failure_alot_gc_number = total_collections();
-    *count = 0;
-  }
-}
-
-inline void CollectedHeap::reset_promotion_should_fail() {
-  reset_promotion_should_fail(&_promotion_failure_alot_count);
-}
-#endif  // #ifndef PRODUCT
-
 #endif // SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP
--- a/src/hotspot/share/gc/shared/collectorPolicy.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/shared/collectorPolicy.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,7 +26,7 @@
 #include "gc/shared/adaptiveSizePolicy.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/collectorPolicy.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcPolicyCounters.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/generationSpec.hpp"
--- a/src/hotspot/share/gc/shared/gcArguments.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/shared/gcArguments.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -24,74 +25,12 @@
 
 #include "precompiled.hpp"
 #include "gc/shared/gcArguments.hpp"
-#include "gc/serial/serialArguments.hpp"
-#include "logging/log.hpp"
-#include "memory/allocation.inline.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/globals_extension.hpp"
-#include "runtime/java.hpp"
-#include "runtime/os.hpp"
-#include "utilities/defaultStream.hpp"
 #include "utilities/macros.hpp"
 
-#if INCLUDE_ALL_GCS
-#include "gc/parallel/parallelArguments.hpp"
-#include "gc/cms/cmsArguments.hpp"
-#include "gc/g1/g1Arguments.hpp"
-#endif
-
-GCArguments* GCArguments::_instance = NULL;
-
-GCArguments* GCArguments::arguments() {
-  assert(is_initialized(), "Heap factory not yet created");
-  return _instance;
-}
-
-bool GCArguments::is_initialized() {
-  return _instance != NULL;
-}
-
-bool GCArguments::gc_selected() {
-#if INCLUDE_ALL_GCS
-  return UseSerialGC || UseParallelGC || UseParallelOldGC || UseConcMarkSweepGC || UseG1GC;
-#else
-  return UseSerialGC;
-#endif // INCLUDE_ALL_GCS
-}
-
-void GCArguments::select_gc() {
-  if (!gc_selected()) {
-    select_gc_ergonomically();
-    if (!gc_selected()) {
-      vm_exit_during_initialization("Garbage collector not selected (default collector explicitly disabled)", NULL);
-    }
-  }
-}
-
-void GCArguments::select_gc_ergonomically() {
-#if INCLUDE_ALL_GCS
-  if (os::is_server_class_machine()) {
-    FLAG_SET_ERGO_IF_DEFAULT(bool, UseG1GC, true);
-  } else {
-    FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
-  }
-#else
-  UNSUPPORTED_OPTION(UseG1GC);
-  UNSUPPORTED_OPTION(UseParallelGC);
-  UNSUPPORTED_OPTION(UseParallelOldGC);
-  UNSUPPORTED_OPTION(UseConcMarkSweepGC);
-  FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
-#endif // INCLUDE_ALL_GCS
-}
-
-bool GCArguments::parse_verification_type(const char* type) {
-  log_warning(gc, verify)("VerifyGCType is not supported by this collector.");
-  // Return false to avoid multiple warnings.
-  return false;
-}
-
-void GCArguments::initialize_flags() {
+void GCArguments::initialize() {
 #if INCLUDE_ALL_GCS
   if (MinHeapFreeRatio == 100) {
     // Keeping the heap 100% free is hard ;-) so limit it to 99%.
@@ -105,52 +44,3 @@
   }
 #endif // INCLUDE_ALL_GCS
 }
-
-void GCArguments::post_heap_initialize() {
-  if (strlen(VerifyGCType) > 0) {
-    const char delimiter[] = " ,\n";
-    size_t length = strlen(VerifyGCType);
-    char* type_list = NEW_C_HEAP_ARRAY(char, length + 1, mtInternal);
-    strncpy(type_list, VerifyGCType, length + 1);
-    char* token = strtok(type_list, delimiter);
-    while (token != NULL) {
-      bool success = parse_verification_type(token);
-      if (!success) {
-        break;
-      }
-      token = strtok(NULL, delimiter);
-    }
-    FREE_C_HEAP_ARRAY(char, type_list);
-  }
-}
-
-jint GCArguments::initialize() {
-  assert(!is_initialized(), "GC arguments already initialized");
-
-  select_gc();
-
-#if !INCLUDE_ALL_GCS
-  if (UseParallelGC || UseParallelOldGC) {
-    jio_fprintf(defaultStream::error_stream(), "UseParallelGC not supported in this VM.\n");
-    return JNI_ERR;
-  } else if (UseG1GC) {
-    jio_fprintf(defaultStream::error_stream(), "UseG1GC not supported in this VM.\n");
-    return JNI_ERR;
-  } else if (UseConcMarkSweepGC) {
-    jio_fprintf(defaultStream::error_stream(), "UseConcMarkSweepGC not supported in this VM.\n");
-    return JNI_ERR;
-#else
-  if (UseParallelGC || UseParallelOldGC) {
-    _instance = new ParallelArguments();
-  } else if (UseG1GC) {
-    _instance = new G1Arguments();
-  } else if (UseConcMarkSweepGC) {
-    _instance = new CMSArguments();
-#endif
-  } else if (UseSerialGC) {
-    _instance = new SerialArguments();
-  } else {
-    ShouldNotReachHere();
-  }
-  return JNI_OK;
-}
--- a/src/hotspot/share/gc/shared/gcArguments.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/shared/gcArguments.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -29,35 +30,14 @@
 
 class CollectedHeap;
 
-class GCArguments : public CHeapObj<mtGC> {
-private:
-  static GCArguments* _instance;
-
-  static void select_gc();
-  static void select_gc_ergonomically();
-  static bool gc_selected();
-
+class GCArguments {
 protected:
   template <class Heap, class Policy>
   CollectedHeap* create_heap_with_policy();
 
 public:
-  static jint initialize();
-  static bool is_initialized();
-  static GCArguments* arguments();
-
-  void post_heap_initialize();
-
-  virtual void initialize_flags();
-
-  // Collector specific function to allow finer grained verification
-  // through VerifyGCType. If not overridden the default version will
-  // warn that the flag is not supported for the given collector.
-  // Returns true if parsing should continue, false otherwise.
-  virtual bool parse_verification_type(const char* type);
-
+  virtual void initialize();
   virtual size_t conservative_max_heap_alignment() = 0;
-
   virtual CollectedHeap* create_heap() = 0;
 };
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/gcConfig.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/serial/serialArguments.hpp"
+#include "gc/shared/gcConfig.hpp"
+#include "runtime/java.hpp"
+#include "runtime/os.hpp"
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc/parallel/parallelArguments.hpp"
+#include "gc/cms/cmsArguments.hpp"
+#include "gc/g1/g1Arguments.hpp"
+#endif // INCLUDE_ALL_GCS
+
+struct SupportedGC {
+  bool&               _flag;
+  CollectedHeap::Name _name;
+  GCArguments&        _arguments;
+
+  SupportedGC(bool& flag, CollectedHeap::Name name, GCArguments& arguments) :
+      _flag(flag), _name(name), _arguments(arguments) {}
+};
+
+static SerialArguments   serialArguments;
+#if INCLUDE_ALL_GCS
+static ParallelArguments parallelArguments;
+static CMSArguments      cmsArguments;
+static G1Arguments       g1Arguments;
+#endif // INCLUDE_ALL_GCS
+
+// Table of supported GCs, for translating between command
+// line flag, CollectedHeap::Name and GCArguments instance.
+static const SupportedGC SupportedGCs[] = {
+  SupportedGC(UseSerialGC,        CollectedHeap::Serial,   serialArguments),
+#if INCLUDE_ALL_GCS
+  SupportedGC(UseParallelGC,      CollectedHeap::Parallel, parallelArguments),
+  SupportedGC(UseParallelOldGC,   CollectedHeap::Parallel, parallelArguments),
+  SupportedGC(UseConcMarkSweepGC, CollectedHeap::CMS,      cmsArguments),
+  SupportedGC(UseG1GC,            CollectedHeap::G1,       g1Arguments),
+#endif // INCLUDE_ALL_GCS
+};
+
+GCArguments* GCConfig::_arguments = NULL;
+bool GCConfig::_gc_selected_ergonomically = false;
+
+void GCConfig::select_gc_ergonomically() {
+#if INCLUDE_ALL_GCS
+  if (os::is_server_class_machine()) {
+    FLAG_SET_ERGO_IF_DEFAULT(bool, UseG1GC, true);
+  } else {
+    FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
+  }
+#else
+  UNSUPPORTED_OPTION(UseG1GC);
+  UNSUPPORTED_OPTION(UseParallelGC);
+  UNSUPPORTED_OPTION(UseParallelOldGC);
+  UNSUPPORTED_OPTION(UseConcMarkSweepGC);
+  FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
+#endif // INCLUDE_ALL_GCS
+}
+
+bool GCConfig::is_no_gc_selected() {
+  for (size_t i = 0; i < ARRAY_SIZE(SupportedGCs); i++) {
+    if (SupportedGCs[i]._flag) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+bool GCConfig::is_exactly_one_gc_selected() {
+  CollectedHeap::Name selected = CollectedHeap::None;
+
+  for (size_t i = 0; i < ARRAY_SIZE(SupportedGCs); i++) {
+    if (SupportedGCs[i]._flag) {
+      if (SupportedGCs[i]._name == selected || selected == CollectedHeap::None) {
+        // Selected
+        selected = SupportedGCs[i]._name;
+      } else {
+        // More than one selected
+        return false;
+      }
+    }
+  }
+
+  return selected != CollectedHeap::None;
+}
+
+GCArguments* GCConfig::select_gc() {
+  if (is_no_gc_selected()) {
+    // Try select GC ergonomically
+    select_gc_ergonomically();
+
+    if (is_no_gc_selected()) {
+      // Failed to select GC ergonomically
+      vm_exit_during_initialization("Garbage collector not selected "
+                                    "(default collector explicitly disabled)", NULL);
+    }
+
+    // Succeeded to select GC ergonomically
+    _gc_selected_ergonomically = true;
+  }
+
+  if (is_exactly_one_gc_selected()) {
+    // Exacly one GC selected
+    for (size_t i = 0; i < ARRAY_SIZE(SupportedGCs); i++) {
+      if (SupportedGCs[i]._flag) {
+        return &SupportedGCs[i]._arguments;
+      }
+    }
+  }
+
+  // More than one GC selected
+  vm_exit_during_initialization("Multiple garbage collectors selected", NULL);
+
+  return NULL;
+}
+
+void GCConfig::initialize() {
+  assert(_arguments == NULL, "Already initialized");
+  _arguments = select_gc();
+}
+
+bool GCConfig::is_gc_supported(CollectedHeap::Name name) {
+  for (size_t i = 0; i < ARRAY_SIZE(SupportedGCs); i++) {
+    if (SupportedGCs[i]._name == name) {
+      // Supported
+      return true;
+    }
+  }
+
+  // Not supported
+  return false;
+}
+
+bool GCConfig::is_gc_selected(CollectedHeap::Name name) {
+  for (size_t i = 0; i < ARRAY_SIZE(SupportedGCs); i++) {
+    if (SupportedGCs[i]._name == name && SupportedGCs[i]._flag) {
+      // Selected
+      return true;
+    }
+  }
+
+  // Not selected
+  return false;
+}
+
+bool GCConfig::is_gc_selected_ergonomically() {
+  return _gc_selected_ergonomically;
+}
+
+GCArguments* GCConfig::arguments() {
+  assert(_arguments != NULL, "Not initialized");
+  return _arguments;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/gcConfig.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_GCCONFIG_HPP
+#define SHARE_GC_SHARED_GCCONFIG_HPP
+
+#include "gc/shared/collectedHeap.hpp"
+#include "memory/allocation.hpp"
+
+class GCArguments;
+
+class GCConfig : public AllStatic {
+private:
+  static GCArguments* _arguments;
+  static bool         _gc_selected_ergonomically;
+
+  static bool is_no_gc_selected();
+  static bool is_exactly_one_gc_selected();
+
+  static void select_gc_ergonomically();
+  static GCArguments* select_gc();
+
+public:
+  static void initialize();
+
+  static bool is_gc_supported(CollectedHeap::Name name);
+  static bool is_gc_selected(CollectedHeap::Name name);
+  static bool is_gc_selected_ergonomically();
+
+  static GCArguments* arguments();
+};
+
+#endif // SHARE_GC_SHARED_GCCONFIG_HPP
--- a/src/hotspot/share/gc/shared/gcLocker.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/shared/gcLocker.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -24,10 +24,11 @@
 
 #include "precompiled.hpp"
 #include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "memory/resourceArea.hpp"
 #include "logging/log.hpp"
 #include "runtime/atomic.hpp"
+#include "runtime/safepoint.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadSMR.hpp"
 
@@ -85,6 +86,10 @@
   }
 }
 
+bool GCLocker::is_at_safepoint() {
+  return SafepointSynchronize::is_at_safepoint();
+}
+
 bool GCLocker::check_active_before_gc() {
   assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
   if (is_active() && !_needs_gc) {
@@ -145,87 +150,3 @@
     JNICritical_lock->notify_all();
   }
 }
-
-// Implementation of NoGCVerifier
-
-#ifdef ASSERT
-
-NoGCVerifier::NoGCVerifier(bool verifygc) {
-  _verifygc = verifygc;
-  if (_verifygc) {
-    CollectedHeap* h = Universe::heap();
-    assert(!h->is_gc_active(), "GC active during NoGCVerifier");
-    _old_invocations = h->total_collections();
-  }
-}
-
-
-NoGCVerifier::~NoGCVerifier() {
-  if (_verifygc) {
-    CollectedHeap* h = Universe::heap();
-    assert(!h->is_gc_active(), "GC active during NoGCVerifier");
-    if (_old_invocations != h->total_collections()) {
-      fatal("collection in a NoGCVerifier secured function");
-    }
-  }
-}
-
-PauseNoGCVerifier::PauseNoGCVerifier(NoGCVerifier * ngcv) {
-  _ngcv = ngcv;
-  if (_ngcv->_verifygc) {
-    // if we were verifying, then make sure that nothing is
-    // wrong before we "pause" verification
-    CollectedHeap* h = Universe::heap();
-    assert(!h->is_gc_active(), "GC active during NoGCVerifier");
-    if (_ngcv->_old_invocations != h->total_collections()) {
-      fatal("collection in a NoGCVerifier secured function");
-    }
-  }
-}
-
-
-PauseNoGCVerifier::~PauseNoGCVerifier() {
-  if (_ngcv->_verifygc) {
-    // if we were verifying before, then reenable verification
-    CollectedHeap* h = Universe::heap();
-    assert(!h->is_gc_active(), "GC active during NoGCVerifier");
-    _ngcv->_old_invocations = h->total_collections();
-  }
-}
-
-
-// JRT_LEAF rules:
-// A JRT_LEAF method may not interfere with safepointing by
-//   1) acquiring or blocking on a Mutex or JavaLock - checked
-//   2) allocating heap memory - checked
-//   3) executing a VM operation - checked
-//   4) executing a system call (including malloc) that could block or grab a lock
-//   5) invoking GC
-//   6) reaching a safepoint
-//   7) running too long
-// Nor may any method it calls.
-JRTLeafVerifier::JRTLeafVerifier()
-  : NoSafepointVerifier(true, JRTLeafVerifier::should_verify_GC())
-{
-}
-
-JRTLeafVerifier::~JRTLeafVerifier()
-{
-}
-
-bool JRTLeafVerifier::should_verify_GC() {
-  switch (JavaThread::current()->thread_state()) {
-  case _thread_in_Java:
-    // is in a leaf routine, there must be no safepoint.
-    return true;
-  case _thread_in_native:
-    // A native thread is not subject to safepoints.
-    // Even while it is in a leaf routine, GC is ok
-    return false;
-  default:
-    // Leaf routines cannot be called from other contexts.
-    ShouldNotReachHere();
-    return false;
-  }
-}
-#endif
--- a/src/hotspot/share/gc/shared/gcLocker.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/shared/gcLocker.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -25,10 +25,11 @@
 #ifndef SHARE_VM_GC_SHARED_GCLOCKER_HPP
 #define SHARE_VM_GC_SHARED_GCLOCKER_HPP
 
-#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "memory/universe.hpp"
-#include "oops/oop.hpp"
+#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+class JavaThread;
 
 // The direct lock/unlock calls do not force a collection if an unlock
 // decrements the count to zero. Avoid calling these if at all possible.
@@ -65,10 +66,13 @@
   }
 
   static void log_debug_jni(const char* msg);
+
+  static bool is_at_safepoint();
+
  public:
   // Accessors
   static bool is_active() {
-    assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
+    assert(GCLocker::is_at_safepoint(), "only read at safepoint");
     return is_active_internal();
   }
   static bool needs_gc()       { return _needs_gc;                        }
@@ -135,196 +139,10 @@
   // falls into the slow path, or is resumed from the safepoints in
   // the method, which only exist in the slow path. So when _needs_gc
   // is set, the slow path is always taken, till _needs_gc is cleared.
-  static void lock_critical(JavaThread* thread);
-  static void unlock_critical(JavaThread* thread);
+  inline static void lock_critical(JavaThread* thread);
+  inline static void unlock_critical(JavaThread* thread);
 
   static address needs_gc_address() { return (address) &_needs_gc; }
 };
 
-
-// A NoGCVerifier object can be placed in methods where one assumes that
-// no garbage collection will occur. The destructor will verify this property
-// unless the constructor is called with argument false (not verifygc).
-//
-// The check will only be done in debug mode and if verifygc true.
-
-class NoGCVerifier: public StackObj {
- friend class PauseNoGCVerifier;
-
- protected:
-  bool _verifygc;
-  unsigned int _old_invocations;
-
- public:
-#ifdef ASSERT
-  NoGCVerifier(bool verifygc = true);
-  ~NoGCVerifier();
-#else
-  NoGCVerifier(bool verifygc = true) {}
-  ~NoGCVerifier() {}
-#endif
-};
-
-// A PauseNoGCVerifier is used to temporarily pause the behavior
-// of a NoGCVerifier object. If we are not in debug mode or if the
-// NoGCVerifier object has a _verifygc value of false, then there
-// is nothing to do.
-
-class PauseNoGCVerifier: public StackObj {
- private:
-  NoGCVerifier * _ngcv;
-
- public:
-#ifdef ASSERT
-  PauseNoGCVerifier(NoGCVerifier * ngcv);
-  ~PauseNoGCVerifier();
-#else
-  PauseNoGCVerifier(NoGCVerifier * ngcv) {}
-  ~PauseNoGCVerifier() {}
-#endif
-};
-
-
-// A NoSafepointVerifier object will throw an assertion failure if
-// the current thread passes a possible safepoint while this object is
-// instantiated. A safepoint, will either be: an oop allocation, blocking
-// on a Mutex or JavaLock, or executing a VM operation.
-//
-// If StrictSafepointChecks is turned off, it degrades into a NoGCVerifier
-//
-class NoSafepointVerifier : public NoGCVerifier {
- friend class PauseNoSafepointVerifier;
-
- private:
-  bool _activated;
-  Thread *_thread;
- public:
-#ifdef ASSERT
-  NoSafepointVerifier(bool activated = true, bool verifygc = true ) :
-    NoGCVerifier(verifygc),
-    _activated(activated) {
-    _thread = Thread::current();
-    if (_activated) {
-      _thread->_allow_allocation_count++;
-      _thread->_allow_safepoint_count++;
-    }
-  }
-
-  ~NoSafepointVerifier() {
-    if (_activated) {
-      _thread->_allow_allocation_count--;
-      _thread->_allow_safepoint_count--;
-    }
-  }
-#else
-  NoSafepointVerifier(bool activated = true, bool verifygc = true) : NoGCVerifier(verifygc){}
-  ~NoSafepointVerifier() {}
-#endif
-};
-
-// A PauseNoSafepointVerifier is used to temporarily pause the
-// behavior of a NoSafepointVerifier object. If we are not in debug
-// mode then there is nothing to do. If the NoSafepointVerifier
-// object has an _activated value of false, then there is nothing to
-// do for safepoint and allocation checking, but there may still be
-// something to do for the underlying NoGCVerifier object.
-
-class PauseNoSafepointVerifier : public PauseNoGCVerifier {
- private:
-  NoSafepointVerifier * _nsv;
-
- public:
-#ifdef ASSERT
-  PauseNoSafepointVerifier(NoSafepointVerifier * nsv)
-    : PauseNoGCVerifier(nsv) {
-
-    _nsv = nsv;
-    if (_nsv->_activated) {
-      _nsv->_thread->_allow_allocation_count--;
-      _nsv->_thread->_allow_safepoint_count--;
-    }
-  }
-
-  ~PauseNoSafepointVerifier() {
-    if (_nsv->_activated) {
-      _nsv->_thread->_allow_allocation_count++;
-      _nsv->_thread->_allow_safepoint_count++;
-    }
-  }
-#else
-  PauseNoSafepointVerifier(NoSafepointVerifier * nsv)
-    : PauseNoGCVerifier(nsv) {}
-  ~PauseNoSafepointVerifier() {}
-#endif
-};
-
-// A SkipGCALot object is used to elide the usual effect of gc-a-lot
-// over a section of execution by a thread. Currently, it's used only to
-// prevent re-entrant calls to GC.
-class SkipGCALot : public StackObj {
-  private:
-   bool _saved;
-   Thread* _t;
-
-  public:
-#ifdef ASSERT
-    SkipGCALot(Thread* t) : _t(t) {
-      _saved = _t->skip_gcalot();
-      _t->set_skip_gcalot(true);
-    }
-
-    ~SkipGCALot() {
-      assert(_t->skip_gcalot(), "Save-restore protocol invariant");
-      _t->set_skip_gcalot(_saved);
-    }
-#else
-    SkipGCALot(Thread* t) { }
-    ~SkipGCALot() { }
-#endif
-};
-
-// JRT_LEAF currently can be called from either _thread_in_Java or
-// _thread_in_native mode. In _thread_in_native, it is ok
-// for another thread to trigger GC. The rest of the JRT_LEAF
-// rules apply.
-class JRTLeafVerifier : public NoSafepointVerifier {
-  static bool should_verify_GC();
- public:
-#ifdef ASSERT
-  JRTLeafVerifier();
-  ~JRTLeafVerifier();
-#else
-  JRTLeafVerifier() {}
-  ~JRTLeafVerifier() {}
-#endif
-};
-
-// A NoAllocVerifier object can be placed in methods where one assumes that
-// no allocation will occur. The destructor will verify this property
-// unless the constructor is called with argument false (not activated).
-//
-// The check will only be done in debug mode and if activated.
-// Note: this only makes sense at safepoints (otherwise, other threads may
-// allocate concurrently.)
-
-class NoAllocVerifier : public StackObj {
- private:
-  bool  _activated;
-
- public:
-#ifdef ASSERT
-  NoAllocVerifier(bool activated = true) {
-    _activated = activated;
-    if (_activated) Thread::current()->_allow_allocation_count++;
-  }
-
-  ~NoAllocVerifier() {
-    if (_activated) Thread::current()->_allow_allocation_count--;
-  }
-#else
-  NoAllocVerifier(bool activated = true) {}
-  ~NoAllocVerifier() {}
-#endif
-};
-
 #endif // SHARE_VM_GC_SHARED_GCLOCKER_HPP
--- a/src/hotspot/share/gc/shared/gcLocker.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/shared/gcLocker.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,8 +26,9 @@
 #define SHARE_VM_GC_SHARED_GCLOCKER_INLINE_HPP
 
 #include "gc/shared/gcLocker.hpp"
+#include "runtime/thread.hpp"
 
-inline void GCLocker::lock_critical(JavaThread* thread) {
+void GCLocker::lock_critical(JavaThread* thread) {
   if (!thread->in_critical()) {
     if (needs_gc()) {
       // jni_lock call calls enter_critical under the lock so that the
@@ -40,7 +41,7 @@
   thread->enter_critical();
 }
 
-inline void GCLocker::unlock_critical(JavaThread* thread) {
+void GCLocker::unlock_critical(JavaThread* thread) {
   if (thread->in_last_critical()) {
     if (needs_gc()) {
       // jni_unlock call calls exit_critical under the lock so that
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -36,7 +36,7 @@
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/collectorCounters.hpp"
 #include "gc/shared/gcId.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcPolicyCounters.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
@@ -1232,8 +1232,8 @@
 GenCollectedHeap* GenCollectedHeap::heap() {
   CollectedHeap* heap = Universe::heap();
   assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
-  assert(heap->kind() == CollectedHeap::SerialHeap ||
-         heap->kind() == CollectedHeap::CMSHeap, "Not a GenCollectedHeap");
+  assert(heap->kind() == CollectedHeap::Serial ||
+         heap->kind() == CollectedHeap::CMS, "Invalid name");
   return (GenCollectedHeap*) heap;
 }
 
--- a/src/hotspot/share/gc/shared/genOopClosures.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/shared/genOopClosures.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -31,6 +31,8 @@
 #include "gc/shared/genOopClosures.hpp"
 #include "gc/shared/generation.hpp"
 #include "gc/shared/space.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 
 inline OopsInGenClosure::OopsInGenClosure(Generation* gen) :
   ExtendedOopClosure(gen->ref_processor()), _orig_gen(gen), _rs(NULL) {
@@ -48,9 +50,9 @@
 
 template <class T> inline void OopsInGenClosure::do_barrier(T* p) {
   assert(generation()->is_in_reserved(p), "expected ref in generation");
-  T heap_oop = oopDesc::load_heap_oop(p);
-  assert(!oopDesc::is_null(heap_oop), "expected non-null oop");
-  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  assert(!CompressedOops::is_null(heap_oop), "expected non-null oop");
+  oop obj = CompressedOops::decode_not_null(heap_oop);
   // If p points to a younger generation, mark the card.
   if ((HeapWord*)obj < _gen_boundary) {
     _rs->inline_write_ref_field_gc(p, obj);
@@ -59,9 +61,9 @@
 
 template <class T> inline void OopsInGenClosure::par_do_barrier(T* p) {
   assert(generation()->is_in_reserved(p), "expected ref in generation");
-  T heap_oop = oopDesc::load_heap_oop(p);
-  assert(!oopDesc::is_null(heap_oop), "expected non-null oop");
-  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  assert(!CompressedOops::is_null(heap_oop), "expected non-null oop");
+  oop obj = CompressedOops::decode_not_null(heap_oop);
   // If p points to a younger generation, mark the card.
   if ((HeapWord*)obj < gen_boundary()) {
     rs()->write_ref_field_gc_par(p, obj);
@@ -78,15 +80,15 @@
 // NOTE! Any changes made here should also be made
 // in FastScanClosure::do_oop_work()
 template <class T> inline void ScanClosure::do_oop_work(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
+  T heap_oop = RawAccess<>::oop_load(p);
   // Should we copy the obj?
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     if ((HeapWord*)obj < _boundary) {
       assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
       oop new_obj = obj->is_forwarded() ? obj->forwardee()
                                         : _g->copy_to_survivor_space(obj);
-      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+      RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
     }
 
     if (is_scanning_a_cld()) {
@@ -104,15 +106,15 @@
 // NOTE! Any changes made here should also be made
 // in ScanClosure::do_oop_work()
 template <class T> inline void FastScanClosure::do_oop_work(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
+  T heap_oop = RawAccess<>::oop_load(p);
   // Should we copy the obj?
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     if ((HeapWord*)obj < _boundary) {
       assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
       oop new_obj = obj->is_forwarded() ? obj->forwardee()
                                         : _g->copy_to_survivor_space(obj);
-      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+      RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
       if (is_scanning_a_cld()) {
         do_cld_barrier();
       } else if (_gc_barrier) {
@@ -127,9 +129,9 @@
 inline void FastScanClosure::do_oop_nv(narrowOop* p) { FastScanClosure::do_oop_work(p); }
 
 template <class T> void FilteringClosure::do_oop_work(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     if ((HeapWord*)obj < _boundary) {
       _cl->do_oop(p);
     }
@@ -142,14 +144,13 @@
 // Note similarity to ScanClosure; the difference is that
 // the barrier set is taken care of outside this closure.
 template <class T> inline void ScanWeakRefClosure::do_oop_work(T* p) {
-  assert(!oopDesc::is_null(*p), "null weak reference?");
-  oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+  oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
   // weak references are sometimes scanned twice; must check
   // that to-space doesn't already contain this object
   if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) {
     oop new_obj = obj->is_forwarded() ? obj->forwardee()
                                       : _g->copy_to_survivor_space(obj);
-    oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+    RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
   }
 }
 
--- a/src/hotspot/share/gc/shared/generation.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/shared/generation.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,7 +27,7 @@
 #include "gc/shared/blockOffsetTable.inline.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
--- a/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,6 +27,7 @@
 
 #include "gc/shared/barrierSet.hpp"
 #include "gc/shared/modRefBarrierSet.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
 #include "oops/objArrayOop.hpp"
 #include "oops/oop.hpp"
@@ -105,7 +106,7 @@
     T* end = from + length;
     for (T* p = dst; from < end; from++, p++) {
       T element = *from;
-      if (bound->is_instanceof_or_null(element)) {
+      if (oopDesc::is_instanceof_or_null(CompressedOops::decode(element), bound)) {
         bs->template write_ref_field_pre<decorators>(p);
         *p = element;
       } else {
--- a/src/hotspot/share/gc/shared/oopStorage.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/shared/oopStorage.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -120,12 +120,6 @@
 const unsigned section_count = BytesPerWord;
 const unsigned block_alignment = sizeof(oop) * section_size;
 
-// VS2013 warns (C4351) that elements of _data will be *correctly* default
-// initialized, unlike earlier versions that *incorrectly* did not do so.
-#ifdef _WINDOWS
-#pragma warning(push)
-#pragma warning(disable: 4351)
-#endif // _WINDOWS
 OopStorage::Block::Block(const OopStorage* owner, void* memory) :
   _data(),
   _allocated_bitmask(0),
@@ -142,9 +136,6 @@
   assert(owner != NULL, "NULL owner");
   assert(is_aligned(this, block_alignment), "misaligned block");
 }
-#ifdef _WINDOWS
-#pragma warning(pop)
-#endif
 
 OopStorage::Block::~Block() {
   assert(_release_refcount == 0, "deleting block while releasing");
--- a/src/hotspot/share/gc/shared/referenceProcessor.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/shared/referenceProcessor.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,17 +26,18 @@
 #define SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_INLINE_HPP
 
 #include "gc/shared/referenceProcessor.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "oops/oop.hpp"
 
 oop DiscoveredList::head() const {
-  return UseCompressedOops ?  oopDesc::decode_heap_oop(_compressed_head) :
+  return UseCompressedOops ?  CompressedOops::decode(_compressed_head) :
     _oop_head;
 }
 
 void DiscoveredList::set_head(oop o) {
   if (UseCompressedOops) {
     // Must compress the head ptr.
-    _compressed_head = oopDesc::encode_heap_oop(o);
+    _compressed_head = CompressedOops::encode(o);
   } else {
     _oop_head = o;
   }
--- a/src/hotspot/share/gc/shared/space.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/shared/space.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -145,6 +145,9 @@
   bool is_in(const void* p) const {
     return used_region().contains(p);
   }
+  bool is_in(oop obj) const {
+    return is_in((void*)obj);
+  }
 
   // Returns true iff the given reserved memory of the space contains the
   // given address.
--- a/src/hotspot/share/gc/shared/vmGCOperations.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/gc/shared/vmGCOperations.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,7 +27,7 @@
 #include "classfile/javaClasses.hpp"
 #include "gc/shared/allocTracer.hpp"
 #include "gc/shared/gcId.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/vmGCOperations.hpp"
 #include "interpreter/oopMapCache.hpp"
--- a/src/hotspot/share/interpreter/bytecodeInterpreter.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/interpreter/bytecodeInterpreter.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -25,6 +25,7 @@
 // no precompiled headers
 #include "classfile/vmSymbols.hpp"
 #include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/threadLocalAllocBuffer.inline.hpp"
 #include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/bytecodeInterpreter.hpp"
 #include "interpreter/bytecodeInterpreter.inline.hpp"
@@ -2434,7 +2435,7 @@
                   handle_exception);
           result = THREAD->vm_result();
         }
-        if (result == Universe::the_null_sentinel())
+        if (oopDesc::equals(result, Universe::the_null_sentinel()))
           result = NULL;
 
         VERIFY_OOP(result);
--- a/src/hotspot/share/interpreter/interpreterRuntime.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -65,6 +65,7 @@
 #include "runtime/synchronizer.hpp"
 #include "runtime/threadCritical.hpp"
 #include "utilities/align.hpp"
+#include "utilities/copy.hpp"
 #include "utilities/events.hpp"
 #ifdef COMPILER2
 #include "opto/runtime.hpp"
@@ -207,7 +208,7 @@
     if (rindex >= 0) {
       oop coop = m->constants()->resolved_references()->obj_at(rindex);
       oop roop = (result == NULL ? Universe::the_null_sentinel() : result);
-      assert(roop == coop, "expected result for assembly code");
+      assert(oopDesc::equals(roop, coop), "expected result for assembly code");
     }
   }
 #endif
--- a/src/hotspot/share/interpreter/invocationCounter.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/interpreter/invocationCounter.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_INTERPRETER_INVOCATIONCOUNTER_HPP
 #define SHARE_VM_INTERPRETER_INVOCATIONCOUNTER_HPP
 
-#include "memory/allocation.hpp"
 #include "runtime/handles.hpp"
 #include "utilities/exceptions.hpp"
 
--- a/src/hotspot/share/interpreter/linkResolver.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/interpreter/linkResolver.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -32,7 +32,6 @@
 #include "classfile/vmSymbols.hpp"
 #include "compiler/compileBroker.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "interpreter/bytecode.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "interpreter/linkResolver.hpp"
@@ -53,6 +52,7 @@
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/reflection.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/vmThread.hpp"
--- a/src/hotspot/share/interpreter/rewriter.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/interpreter/rewriter.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -23,7 +23,6 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "interpreter/bytecodes.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/rewriter.hpp"
--- a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -43,6 +43,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/safepointMechanism.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
 #include "utilities/align.hpp"
 
 // frequently used constants
@@ -634,7 +635,7 @@
 
   if (!compiled_code->is_a(HotSpotCompiledNmethod::klass())) {
     oop stubName = HotSpotCompiledCode::name(compiled_code_obj);
-    if (oopDesc::is_null(stubName)) {
+    if (stubName == NULL) {
       JVMCI_ERROR_OK("stub should have a name");
     }
     char* name = strdup(java_lang_String::as_utf8_string(stubName));
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -25,6 +25,7 @@
 #include "ci/ciUtilities.inline.hpp"
 #include "classfile/javaClasses.inline.hpp"
 #include "code/scopeDesc.hpp"
+#include "interpreter/linkResolver.hpp"
 #include "memory/oopFactory.hpp"
 #include "oops/cpCache.inline.hpp"
 #include "oops/generateOopMap.hpp"
--- a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -29,6 +29,7 @@
 #include "jvmci/jvmciCompilerToVM.hpp"
 #include "jvmci/vmStructs_jvmci.hpp"
 #include "runtime/handles.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
 #include "utilities/resourceHash.hpp"
 
 
--- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -680,6 +680,20 @@
 #define VM_STRUCTS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
   volatile_nonstatic_field(JavaFrameAnchor, _last_Java_fp, intptr_t*)
 
+#define VM_INT_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) \
+  declare_constant(VM_Version::CPU_FP)                  \
+  declare_constant(VM_Version::CPU_ASIMD)               \
+  declare_constant(VM_Version::CPU_EVTSTRM)             \
+  declare_constant(VM_Version::CPU_AES)                 \
+  declare_constant(VM_Version::CPU_PMULL)               \
+  declare_constant(VM_Version::CPU_SHA1)                \
+  declare_constant(VM_Version::CPU_SHA2)                \
+  declare_constant(VM_Version::CPU_CRC32)               \
+  declare_constant(VM_Version::CPU_LSE)                 \
+  declare_constant(VM_Version::CPU_STXR_PREFETCH)       \
+  declare_constant(VM_Version::CPU_A53MAC)              \
+  declare_constant(VM_Version::CPU_DMB_ATOMICS)
+
 #endif
 
 
--- a/src/hotspot/share/logging/log.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/logging/log.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -28,7 +28,6 @@
 #include "logging/logPrefix.hpp"
 #include "logging/logTagSet.hpp"
 #include "logging/logTag.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/os.hpp"
 #include "utilities/debug.hpp"
 
--- a/src/hotspot/share/logging/logDecorations.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/logging/logDecorations.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,7 +26,6 @@
 
 #include "logging/logDecorators.hpp"
 #include "logging/logTagSet.hpp"
-#include "memory/allocation.hpp"
 
 // Temporary object containing the necessary data for a log call's decorations (timestamps, etc).
 class LogDecorations {
--- a/src/hotspot/share/logging/logDecorators.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/logging/logDecorators.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -24,7 +24,6 @@
 #ifndef SHARE_VM_LOGGING_LOGDECORATORS_HPP
 #define SHARE_VM_LOGGING_LOGDECORATORS_HPP
 
-#include "memory/allocation.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 // The list of available decorators:
--- a/src/hotspot/share/logging/logPrefix.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/logging/logPrefix.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -72,6 +72,7 @@
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, plab)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, region)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, remset)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, remset, tracking)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ref)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ref, start)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, start)) \
--- a/src/hotspot/share/logging/logTag.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/logging/logTag.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -124,7 +124,6 @@
   LOG_TAG(resolve) \
   LOG_TAG(safepoint) \
   LOG_TAG(scavenge) \
-  LOG_TAG(scrub) \
   LOG_TAG(smr) \
   LOG_TAG(stacktrace) \
   LOG_TAG(stackwalk) \
@@ -145,6 +144,7 @@
   LOG_TAG(tlab) \
   LOG_TAG(time) \
   LOG_TAG(timer) \
+  LOG_TAG(tracking) \
   LOG_TAG(update) \
   LOG_TAG(unload) /* Trace unloading of classes */ \
   LOG_TAG(unshareable) \
--- a/src/hotspot/share/memory/allocation.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/memory/allocation.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -549,7 +549,7 @@
   static size_t size_for(size_t length);
 
   static E* allocate(size_t length, MEMFLAGS flags);
-  static void free(E* addr, size_t length);
+  static void free(E* addr);
 };
 
 #endif // SHARE_VM_MEMORY_ALLOCATION_HPP
--- a/src/hotspot/share/memory/allocation.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/memory/allocation.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -105,7 +105,7 @@
 }
 
 template<class E>
-void MallocArrayAllocator<E>::free(E* addr, size_t /*length*/) {
+void MallocArrayAllocator<E>::free(E* addr) {
   FreeHeap(addr);
 }
 
@@ -152,7 +152,7 @@
 
 template<class E>
 void ArrayAllocator<E>::free_malloc(E* addr, size_t length) {
-  MallocArrayAllocator<E>::free(addr, length);
+  MallocArrayAllocator<E>::free(addr);
 }
 
 template<class E>
--- a/src/hotspot/share/memory/filemap.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/memory/filemap.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -31,9 +31,6 @@
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionaryShared.hpp"
 #include "classfile/altHashing.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1CollectedHeap.hpp"
-#endif
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "logging/logMessage.hpp"
@@ -42,6 +39,7 @@
 #include "memory/metaspaceClosure.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "memory/oopFactory.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/objArrayOop.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "runtime/arguments.hpp"
@@ -51,6 +49,9 @@
 #include "services/memTracker.hpp"
 #include "utilities/align.hpp"
 #include "utilities/defaultStream.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc/g1/g1CollectedHeap.hpp"
+#endif
 
 # include <sys/stat.h>
 # include <errno.h>
@@ -468,7 +469,7 @@
   if (MetaspaceShared::is_heap_region(region)) {
     assert((base - (char*)Universe::narrow_oop_base()) % HeapWordSize == 0, "Sanity");
     if (base != NULL) {
-      si->_addr._offset = (intx)oopDesc::encode_heap_oop_not_null((oop)base);
+      si->_addr._offset = (intx)CompressedOops::encode_not_null((oop)base);
     } else {
       si->_addr._offset = 0;
     }
@@ -783,7 +784,7 @@
     size_t used = si->_used;
     if (used > 0) {
       size_t size = used;
-      char* requested_addr = (char*)((void*)oopDesc::decode_heap_oop_not_null(
+      char* requested_addr = (char*)((void*)CompressedOops::decode_not_null(
                                             (narrowOop)si->_addr._offset));
       regions[region_num] = MemRegion((HeapWord*)requested_addr, size / HeapWordSize);
       region_num ++;
@@ -964,7 +965,7 @@
 char* FileMapInfo::FileMapHeader::region_addr(int idx) {
   if (MetaspaceShared::is_heap_region(idx)) {
     return _space[idx]._used > 0 ?
-             (char*)((void*)oopDesc::decode_heap_oop_not_null((narrowOop)_space[idx]._addr._offset)) : NULL;
+             (char*)((void*)CompressedOops::decode_not_null((narrowOop)_space[idx]._addr._offset)) : NULL;
   } else {
     return _space[idx]._addr._base;
   }
--- a/src/hotspot/share/memory/heap.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/memory/heap.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -129,8 +129,6 @@
 
   // Iteration helpers
   void*      next_used(HeapBlock* b) const;
-  HeapBlock* first_block() const;
-  HeapBlock* next_block(HeapBlock* b) const;
   HeapBlock* block_start(void* p) const;
 
   // to perform additional actions on creation of executable code
@@ -179,6 +177,12 @@
   size_t alignment_offset()     const;           // offset of first byte of any block, within the enclosing alignment unit
   static size_t header_size();                   // returns the header size for each heap block
 
+  size_t segment_size()         const { return _segment_size; }  // for CodeHeapState
+  HeapBlock* first_block() const;                                // for CodeHeapState
+  HeapBlock* next_block(HeapBlock* b) const;                     // for CodeHeapState
+
+  FreeBlock* freelist()         const { return _freelist; }      // for CodeHeapState
+
   size_t allocated_in_freelist() const           { return _freelist_segments * CodeCacheSegmentSize; }
   int    freelist_length()       const           { return _freelist_length; } // number of elements in the freelist
 
--- a/src/hotspot/share/memory/iterator.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/memory/iterator.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,6 +27,8 @@
 
 #include "classfile/classLoaderData.hpp"
 #include "memory/iterator.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/instanceMirrorKlass.inline.hpp"
@@ -52,9 +54,9 @@
 template <typename T>
 void ExtendedOopClosure::verify(T* p) {
   if (should_verify_oops()) {
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
+    T heap_oop = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(heap_oop)) {
+      oop o = CompressedOops::decode_not_null(heap_oop);
       assert(Universe::heap()->is_in_closed_subset(o),
              "should be in closed *p " PTR_FORMAT " " PTR_FORMAT, p2i(p), p2i(o));
     }
--- a/src/hotspot/share/memory/metachunk.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/memory/metachunk.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -24,7 +24,6 @@
 #ifndef SHARE_VM_MEMORY_METACHUNK_HPP
 #define SHARE_VM_MEMORY_METACHUNK_HPP
 
-#include "memory/allocation.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
 
--- a/src/hotspot/share/memory/metaspace.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/memory/metaspace.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -25,7 +25,6 @@
 #include "aot/aotLoader.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/collectorPolicy.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/allocation.hpp"
@@ -1261,11 +1260,6 @@
   // the class loader using the SpaceManager is collected.
   BlockFreelist* _block_freelists;
 
-  // protects virtualspace and chunk expansions
-  static const char*  _expand_lock_name;
-  static const int    _expand_lock_rank;
-  static Mutex* const _expand_lock;
-
  private:
   // Accessors
   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
@@ -1331,8 +1325,6 @@
 
   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
 
-  static Mutex* expand_lock() { return _expand_lock; }
-
   // Increment the per Metaspace and global running sums for Metachunks
   // by the given size.  This is used when a Metachunk to added to
   // the in-use list.
@@ -1416,22 +1408,13 @@
 uint const SpaceManager::_small_chunk_limit = 4;
 uint const SpaceManager::_anon_and_delegating_metadata_specialize_chunk_limit = 4;
 
-const char* SpaceManager::_expand_lock_name =
-  "SpaceManager chunk allocation lock";
-const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
-Mutex* const SpaceManager::_expand_lock =
-  new Mutex(SpaceManager::_expand_lock_rank,
-            SpaceManager::_expand_lock_name,
-            Mutex::_allow_vm_block_flag,
-            Monitor::_safepoint_check_never);
-
 void VirtualSpaceNode::inc_container_count() {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   _container_count++;
 }
 
 void VirtualSpaceNode::dec_container_count() {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   _container_count--;
 }
 
@@ -1731,7 +1714,7 @@
 }
 
 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   Metachunk* result = take_from_committed(chunk_word_size);
   return result;
 }
@@ -1811,11 +1794,11 @@
 }
 
 void VirtualSpaceList::inc_reserved_words(size_t v) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   _reserved_words = _reserved_words + v;
 }
 void VirtualSpaceList::dec_reserved_words(size_t v) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   _reserved_words = _reserved_words - v;
 }
 
@@ -1826,24 +1809,24 @@
           MetaspaceUtils::committed_bytes(), MaxMetaspaceSize);
 
 void VirtualSpaceList::inc_committed_words(size_t v) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   _committed_words = _committed_words + v;
 
   assert_committed_below_limit();
 }
 void VirtualSpaceList::dec_committed_words(size_t v) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   _committed_words = _committed_words - v;
 
   assert_committed_below_limit();
 }
 
 void VirtualSpaceList::inc_virtual_space_count() {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   _virtual_space_count++;
 }
 void VirtualSpaceList::dec_virtual_space_count() {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   _virtual_space_count--;
 }
 
@@ -1861,7 +1844,7 @@
 }
 
 bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   assert(chunk != NULL, "invalid chunk pointer");
   // Check for valid merge combinations.
   assert((chunk->get_chunk_type() == SpecializedIndex &&
@@ -1994,7 +1977,7 @@
 // the node from their respective freelists.
 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   // Don't use a VirtualSpaceListIterator because this
   // list is being changed and a straightforward use of an iterator is not safe.
   VirtualSpaceNode* purged_vsl = NULL;
@@ -2058,7 +2041,7 @@
 }
 
 void VirtualSpaceList::retire_current_virtual_space() {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
 
   VirtualSpaceNode* vsn = current_virtual_space();
 
@@ -2100,7 +2083,7 @@
                                    _reserved_words(0),
                                    _committed_words(0),
                                    _virtual_space_count(0) {
-  MutexLockerEx cl(SpaceManager::expand_lock(),
+  MutexLockerEx cl(MetaspaceExpand_lock,
                    Mutex::_no_safepoint_check_flag);
   create_new_virtual_space(word_size);
 }
@@ -2112,7 +2095,7 @@
                                    _reserved_words(0),
                                    _committed_words(0),
                                    _virtual_space_count(0) {
-  MutexLockerEx cl(SpaceManager::expand_lock(),
+  MutexLockerEx cl(MetaspaceExpand_lock,
                    Mutex::_no_safepoint_check_flag);
   VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
   bool succeeded = class_entry->initialize();
@@ -2127,7 +2110,7 @@
 
 // Allocate another meta virtual space and add it to the list.
 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
 
   if (is_class()) {
     assert(false, "We currently don't support more than one VirtualSpace for"
@@ -2616,14 +2599,14 @@
 
 // Update internal accounting after a chunk was added
 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   _free_chunks_count ++;
   _free_chunks_total += c->word_size();
 }
 
 // Update internal accounting after a chunk was removed
 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   assert(_free_chunks_count >= 1,
     "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
   assert(_free_chunks_total >= c->word_size(),
@@ -2635,8 +2618,8 @@
 
 size_t ChunkManager::free_chunks_count() {
 #ifdef ASSERT
-  if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
-    MutexLockerEx cl(SpaceManager::expand_lock(),
+  if (!UseConcMarkSweepGC && !MetaspaceExpand_lock->is_locked()) {
+    MutexLockerEx cl(MetaspaceExpand_lock,
                      Mutex::_no_safepoint_check_flag);
     // This lock is only needed in debug because the verification
     // of the _free_chunks_totals walks the list of free chunks
@@ -2657,7 +2640,7 @@
 }
 
 void ChunkManager::locked_verify_free_chunks_total() {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   assert(sum_free_chunks() == _free_chunks_total,
          "_free_chunks_total " SIZE_FORMAT " is not the"
          " same as sum " SIZE_FORMAT, _free_chunks_total,
@@ -2665,13 +2648,13 @@
 }
 
 void ChunkManager::verify_free_chunks_total() {
-  MutexLockerEx cl(SpaceManager::expand_lock(),
+  MutexLockerEx cl(MetaspaceExpand_lock,
                      Mutex::_no_safepoint_check_flag);
   locked_verify_free_chunks_total();
 }
 
 void ChunkManager::locked_verify_free_chunks_count() {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   assert(sum_free_chunks_count() == _free_chunks_count,
          "_free_chunks_count " SIZE_FORMAT " is not the"
          " same as sum " SIZE_FORMAT, _free_chunks_count,
@@ -2680,14 +2663,14 @@
 
 void ChunkManager::verify_free_chunks_count() {
 #ifdef ASSERT
-  MutexLockerEx cl(SpaceManager::expand_lock(),
+  MutexLockerEx cl(MetaspaceExpand_lock,
                      Mutex::_no_safepoint_check_flag);
   locked_verify_free_chunks_count();
 #endif
 }
 
 void ChunkManager::verify() {
-  MutexLockerEx cl(SpaceManager::expand_lock(),
+  MutexLockerEx cl(MetaspaceExpand_lock,
                      Mutex::_no_safepoint_check_flag);
   locked_verify();
 }
@@ -2709,13 +2692,13 @@
 }
 
 void ChunkManager::locked_print_free_chunks(outputStream* st) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
                 _free_chunks_total, _free_chunks_count);
 }
 
 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
                 sum_free_chunks(), sum_free_chunks_count());
 }
@@ -2730,7 +2713,7 @@
 // These methods that sum the free chunk lists are used in printing
 // methods that are used in product builds.
 size_t ChunkManager::sum_free_chunks() {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   size_t result = 0;
   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
     ChunkList* list = free_chunks(i);
@@ -2746,7 +2729,7 @@
 }
 
 size_t ChunkManager::sum_free_chunks_count() {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   size_t count = 0;
   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
     ChunkList* list = free_chunks(i);
@@ -2862,7 +2845,7 @@
 }
 
 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
 
   slow_locked_verify();
 
@@ -2969,7 +2952,7 @@
 }
 
 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   slow_locked_verify();
 
   // Take from the beginning of the list
@@ -3001,7 +2984,7 @@
 }
 
 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   DEBUG_ONLY(do_verify_chunk(chunk);)
   assert(chunk->get_chunk_type() == index, "Chunk does not match expected index.");
   assert(chunk != NULL, "Expected chunk.");
@@ -3090,7 +3073,7 @@
 }
 
 void ChunkManager::locked_get_statistics(ChunkManagerStatistics* stat) const {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
     stat->num_by_type[i] = num_free_chunks(i);
     stat->single_size_by_type[i] = size_by_index(i);
@@ -3101,7 +3084,7 @@
 }
 
 void ChunkManager::get_statistics(ChunkManagerStatistics* stat) const {
-  MutexLockerEx cl(SpaceManager::expand_lock(),
+  MutexLockerEx cl(MetaspaceExpand_lock,
                    Mutex::_no_safepoint_check_flag);
   locked_get_statistics(stat);
 }
@@ -3400,7 +3383,7 @@
   assert(current_chunk() == NULL ||
          current_chunk()->allocate(word_size) == NULL,
          "Don't need to expand");
-  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
+  MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
 
   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
     size_t words_left = 0;
@@ -3469,7 +3452,7 @@
 }
 
 void SpaceManager::inc_size_metrics(size_t words) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   // Total of allocated Metachunks and allocated Metachunks count
   // for each SpaceManager
   _allocated_chunks_words = _allocated_chunks_words + words;
@@ -3508,13 +3491,13 @@
 }
 
 SpaceManager::~SpaceManager() {
-  // This call this->_lock which can't be done while holding expand_lock()
+  // This call this->_lock which can't be done while holding MetaspaceExpand_lock
   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
          " allocated_chunks_words() " SIZE_FORMAT,
          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
 
-  MutexLockerEx fcl(SpaceManager::expand_lock(),
+  MutexLockerEx fcl(MetaspaceExpand_lock,
                     Mutex::_no_safepoint_check_flag);
 
   assert(sum_count_in_chunks_in_use() == allocated_chunks_count(),
@@ -3779,7 +3762,7 @@
 }
 
 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   assert(words <= capacity_words(mdtype),
          "About to decrement below 0: words " SIZE_FORMAT
          " is greater than _capacity_words[%u] " SIZE_FORMAT,
@@ -3788,7 +3771,7 @@
 }
 
 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
-  assert_lock_strong(SpaceManager::expand_lock());
+  assert_lock_strong(MetaspaceExpand_lock);
   // Needs to be atomic
   _capacity_words[mdtype] += words;
 }
@@ -3799,7 +3782,7 @@
          " is greater than _used_words[%u] " SIZE_FORMAT,
          words, mdtype, used_words(mdtype));
   // For CMS deallocation of the Metaspaces occurs during the
-  // sweep which is a concurrent phase.  Protection by the expand_lock()
+  // sweep which is a concurrent phase.  Protection by the MetaspaceExpand_lock
   // is not enough since allocation is on a per Metaspace basis
   // and protected by the Metaspace lock.
   Atomic::sub(words, &_used_words[mdtype]);
@@ -4228,7 +4211,7 @@
 
 // Prints an ASCII representation of the given space.
 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
-  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
+  MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
   const bool for_class = mdtype == Metaspace::ClassType ? true : false;
   VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
   if (vsl != NULL) {
@@ -4680,17 +4663,13 @@
   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
 
   if (result == NULL) {
-    if (DumpSharedSpaces && THREAD->is_VM_thread()) {
-      tty->print_cr("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.",
-          MetaspaceObj::type_name(type), word_size * BytesPerWord);
-      vm_exit(1);
-    }
-
     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
 
     // Allocation failed.
-    if (is_init_completed()) {
+    if (is_init_completed() && !(DumpSharedSpaces && THREAD->is_VM_thread())) {
       // Only start a GC if the bootstrapping has completed.
+      // Also, we cannot GC if we are at the end of the CDS dumping stage which runs inside
+      // the VM thread.
 
       // Try to clean out some memory and retry.
       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
@@ -4698,6 +4677,14 @@
   }
 
   if (result == NULL) {
+    if (DumpSharedSpaces) {
+      // CDS dumping keeps loading classes, so if we hit an OOM we probably will keep hitting OOM.
+      // We should abort to avoid generating a potentially bad archive.
+      tty->print_cr("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.",
+          MetaspaceObj::type_name(type), word_size * BytesPerWord);
+      tty->print_cr("Please increase MaxMetaspaceSize (currently " SIZE_FORMAT " bytes).", MaxMetaspaceSize);
+      vm_exit(1);
+    }
     report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
   }
 
@@ -4775,7 +4762,7 @@
 }
 
 void Metaspace::purge() {
-  MutexLockerEx cl(SpaceManager::expand_lock(),
+  MutexLockerEx cl(MetaspaceExpand_lock,
                    Mutex::_no_safepoint_check_flag);
   purge(NonClassType);
   if (using_class_space()) {
@@ -4843,7 +4830,7 @@
     _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
   }
 
-  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
+  MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
 
   // Allocate chunk for metadata objects
   initialize_first_chunk(type, Metaspace::NonClassType);
@@ -5050,7 +5037,7 @@
 
   static void test_virtual_space_list_large_chunk() {
     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
-    MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
+    MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
     // vm_allocation_granularity aligned on Windows.
     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
@@ -5085,7 +5072,7 @@
 
  public:
   static void test() {
-    MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
+    MutexLockerEx ml(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
     const size_t vsn_test_size_words = MediumChunk  * 4;
     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
 
--- a/src/hotspot/share/memory/metaspaceChunkFreeListSummary.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/memory/metaspaceChunkFreeListSummary.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_MEMORY_METASPACECHUNKFREELISTSUMMARY_HPP
 #define SHARE_VM_MEMORY_METASPACECHUNKFREELISTSUMMARY_HPP
 
-#include "memory/allocation.hpp"
 
 class MetaspaceChunkFreeListSummary {
   size_t _num_specialized_chunks;
--- a/src/hotspot/share/memory/metaspaceShared.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/memory/metaspaceShared.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -35,11 +35,6 @@
 #include "classfile/systemDictionary.hpp"
 #include "classfile/systemDictionaryShared.hpp"
 #include "code/codeCache.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1Allocator.inline.hpp"
-#include "gc/g1/g1CollectedHeap.hpp"
-#endif
-#include "gc/shared/gcLocker.hpp"
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/bytecodes.hpp"
 #include "logging/log.hpp"
@@ -49,6 +44,7 @@
 #include "memory/metaspaceClosure.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/instanceClassLoaderKlass.hpp"
 #include "oops/instanceMirrorKlass.hpp"
 #include "oops/instanceRefKlass.hpp"
@@ -59,6 +55,7 @@
 #include "prims/jvmtiRedefineClasses.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/os.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/timerTrace.hpp"
 #include "runtime/vmThread.hpp"
@@ -66,6 +63,10 @@
 #include "utilities/align.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/hashtable.inline.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc/g1/g1Allocator.inline.hpp"
+#include "gc/g1/g1CollectedHeap.hpp"
+#endif
 
 ReservedSpace MetaspaceShared::_shared_rs;
 VirtualSpace MetaspaceShared::_shared_vs;
@@ -844,7 +845,7 @@
       assert(MetaspaceShared::is_heap_object_archiving_allowed(),
              "Archiving heap object is not allowed");
       _dump_region->append_intptr_t(
-        (intptr_t)oopDesc::encode_heap_oop_not_null(*o));
+        (intptr_t)CompressedOops::encode_not_null(*o));
     }
   }
 
@@ -1936,7 +1937,7 @@
              "Archived heap object is not allowed");
       assert(MetaspaceShared::open_archive_heap_region_mapped(),
              "Open archive heap region is not mapped");
-      RootAccess<IN_ARCHIVE_ROOT>::oop_store(p, oopDesc::decode_heap_oop_not_null(o));
+      RootAccess<IN_ARCHIVE_ROOT>::oop_store(p, CompressedOops::decode_not_null(o));
     }
   }
 
--- a/src/hotspot/share/memory/padded.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/memory/padded.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_MEMORY_PADDED_HPP
 #define SHARE_VM_MEMORY_PADDED_HPP
 
-#include "memory/allocation.hpp"
 #include "utilities/align.hpp"
 #include "utilities/globalDefinitions.hpp"
 
--- a/src/hotspot/share/memory/universe.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/memory/universe.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -35,7 +35,8 @@
 #include "gc/shared/cardTableBarrierSet.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/gcArguments.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcConfig.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/generation.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/space.hpp"
@@ -602,12 +603,12 @@
   // preallocated errors with backtrace have been consumed. Also need to avoid
   // a potential loop which could happen if an out of memory occurs when attempting
   // to allocate the backtrace.
-  return ((throwable() != Universe::_out_of_memory_error_java_heap) &&
-          (throwable() != Universe::_out_of_memory_error_metaspace)  &&
-          (throwable() != Universe::_out_of_memory_error_class_metaspace)  &&
-          (throwable() != Universe::_out_of_memory_error_array_size) &&
-          (throwable() != Universe::_out_of_memory_error_gc_overhead_limit) &&
-          (throwable() != Universe::_out_of_memory_error_realloc_objects));
+  return ((!oopDesc::equals(throwable(), Universe::_out_of_memory_error_java_heap)) &&
+          (!oopDesc::equals(throwable(), Universe::_out_of_memory_error_metaspace))  &&
+          (!oopDesc::equals(throwable(), Universe::_out_of_memory_error_class_metaspace))  &&
+          (!oopDesc::equals(throwable(), Universe::_out_of_memory_error_array_size)) &&
+          (!oopDesc::equals(throwable(), Universe::_out_of_memory_error_gc_overhead_limit)) &&
+          (!oopDesc::equals(throwable(), Universe::_out_of_memory_error_realloc_objects)));
 }
 
 
@@ -745,8 +746,7 @@
 
 CollectedHeap* Universe::create_heap() {
   assert(_collectedHeap == NULL, "Heap already created");
-  assert(GCArguments::is_initialized(), "GC must be initialized here");
-  return GCArguments::arguments()->create_heap();
+  return GCConfig::arguments()->create_heap();
 }
 
 // Choose the heap base address and oop encoding mode
@@ -765,7 +765,6 @@
   }
   log_info(gc)("Using %s", _collectedHeap->name());
 
-  GCArguments::arguments()->post_heap_initialize();
   ThreadLocalAllocBuffer::set_max_size(Universe::heap()->max_tlab_size());
 
 #ifdef _LP64
--- a/src/hotspot/share/memory/virtualspace.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/memory/virtualspace.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
 #include "memory/virtualspace.hpp"
 #include "oops/markOop.hpp"
 #include "oops/oop.inline.hpp"
+#include "runtime/os.inline.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/align.hpp"
 
--- a/src/hotspot/share/memory/virtualspace.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/memory/virtualspace.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_RUNTIME_VIRTUALSPACE_HPP
 #define SHARE_VM_RUNTIME_VIRTUALSPACE_HPP
 
-#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
 
 // ReservedSpace is a data structure for reserving a contiguous address range.
 
--- a/src/hotspot/share/metaprogramming/integralConstant.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/metaprogramming/integralConstant.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_METAPROGRAMMING_INTEGRALCONSTANT_HPP
 #define SHARE_VM_METAPROGRAMMING_INTEGRALCONSTANT_HPP
 
-#include "memory/allocation.hpp"
 
 // An Integral Constant is a class providing a compile-time value of an
 // integral type.  An Integral Constant is also a nullary metafunction,
--- a/src/hotspot/share/metaprogramming/isIntegral.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/metaprogramming/isIntegral.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
 #ifndef SHARE_VM_METAPROGRAMMING_ISINTEGRAL_HPP
 #define SHARE_VM_METAPROGRAMMING_ISINTEGRAL_HPP
 
-#include "memory/allocation.hpp"
 #include "metaprogramming/integralConstant.hpp"
 #include "metaprogramming/isSigned.hpp"
 #include "metaprogramming/removeCV.hpp"
--- a/src/hotspot/share/metaprogramming/isRegisteredEnum.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/metaprogramming/isRegisteredEnum.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_METAPROGRAMMING_ISREGISTEREDENUM_HPP
 #define SHARE_VM_METAPROGRAMMING_ISREGISTEREDENUM_HPP
 
-#include "memory/allocation.hpp"
 #include "metaprogramming/integralConstant.hpp"
 
 // Recognize registered enum types.
--- a/src/hotspot/share/metaprogramming/isSigned.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/metaprogramming/isSigned.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_METAPROGRAMMING_ISSIGNED_HPP
 #define SHARE_VM_METAPROGRAMMING_ISSIGNED_HPP
 
-#include "memory/allocation.hpp"
 #include "metaprogramming/integralConstant.hpp"
 #include "metaprogramming/removeCV.hpp"
 #include <limits>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/oops/access.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/accessDecorators.hpp"
+
+// This macro allows instantiating selected accesses to be usable from the
+// access.hpp file, to break dependencies to the access.inline.hpp file.
+#define INSTANTIATE_HPP_ACCESS(decorators, T, barrier_type)  \
+  template struct RuntimeDispatch<DecoratorFixup<decorators>::value, T, barrier_type>
+
+namespace AccessInternal {
+  INSTANTIATE_HPP_ACCESS(INTERNAL_EMPTY, oop, BARRIER_EQUALS);
+}
--- a/src/hotspot/share/oops/access.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/access.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -22,16 +22,17 @@
  *
  */
 
-#ifndef SHARE_VM_RUNTIME_ACCESS_HPP
-#define SHARE_VM_RUNTIME_ACCESS_HPP
+#ifndef SHARE_OOPS_ACCESS_HPP
+#define SHARE_OOPS_ACCESS_HPP
 
 #include "memory/allocation.hpp"
-#include "metaprogramming/decay.hpp"
-#include "metaprogramming/integralConstant.hpp"
+#include "oops/accessBackend.hpp"
+#include "oops/accessDecorators.hpp"
 #include "oops/oopsHierarchy.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
 
+
 // = GENERAL =
 // Access is an API for performing accesses with declarative semantics. Each access can have a number of "decorators".
 // A decorator is an attribute or property that affects the way a memory access is performed in some way.
@@ -39,11 +40,12 @@
 // e.g. strength of references, strength of GC barriers, or whether compression should be applied or not.
 // Some decorators are set at buildtime, such as whether primitives require GC barriers or not, others
 // at callsites such as whether an access is in the heap or not, and others are resolved at runtime
-// such as GC-specific barriers and encoding/decoding compressed oops.
+// such as GC-specific barriers and encoding/decoding compressed oops. For more information about what
+// decorators are available, cf. oops/accessDecorators.hpp.
 // By pipelining handling of these decorators, the design of the Access API allows separation of concern
 // over the different orthogonal concerns of decorators, while providing a powerful way of
 // expressing these orthogonal semantic properties in a unified way.
-
+//
 // == OPERATIONS ==
 // * load: Load a value from an address.
 // * load_at: Load a value from an internal pointer relative to a base object.
@@ -56,287 +58,39 @@
 // * arraycopy: Copy data from one heap array to another heap array.
 // * clone: Clone the contents of an object to a newly allocated object.
 // * resolve: Resolve a stable to-space invariant oop that is guaranteed not to relocate its payload until a subsequent thread transition.
-
-typedef uint64_t DecoratorSet;
-
-// == Internal Decorators - do not use ==
-// * INTERNAL_EMPTY: This is the name for the empty decorator set (in absence of other decorators).
-// * INTERNAL_CONVERT_COMPRESSED_OOPS: This is an oop access that will require converting an oop
-//   to a narrowOop or vice versa, if UseCompressedOops is known to be set.
-// * INTERNAL_VALUE_IS_OOP: Remember that the involved access is on oop rather than primitive.
-const DecoratorSet INTERNAL_EMPTY                    = UCONST64(0);
-const DecoratorSet INTERNAL_CONVERT_COMPRESSED_OOP   = UCONST64(1) << 1;
-const DecoratorSet INTERNAL_VALUE_IS_OOP             = UCONST64(1) << 2;
-
-// == Internal build-time Decorators ==
-// * INTERNAL_BT_BARRIER_ON_PRIMITIVES: This is set in the barrierSetConfig.hpp file.
-// * INTERNAL_BT_TO_SPACE_INVARIANT: This is set in the barrierSetConfig.hpp file iff
-//   no GC is bundled in the build that is to-space invariant.
-const DecoratorSet INTERNAL_BT_BARRIER_ON_PRIMITIVES = UCONST64(1) << 3;
-const DecoratorSet INTERNAL_BT_TO_SPACE_INVARIANT    = UCONST64(1) << 4;
-
-// == Internal run-time Decorators ==
-// * INTERNAL_RT_USE_COMPRESSED_OOPS: This decorator will be set in runtime resolved
-//   access backends iff UseCompressedOops is true.
-const DecoratorSet INTERNAL_RT_USE_COMPRESSED_OOPS   = UCONST64(1) << 5;
-
-const DecoratorSet INTERNAL_DECORATOR_MASK           = INTERNAL_CONVERT_COMPRESSED_OOP | INTERNAL_VALUE_IS_OOP |
-                                                       INTERNAL_BT_BARRIER_ON_PRIMITIVES | INTERNAL_RT_USE_COMPRESSED_OOPS;
-
-// == Memory Ordering Decorators ==
-// The memory ordering decorators can be described in the following way:
-// === Decorator Rules ===
-// The different types of memory ordering guarantees have a strict order of strength.
-// Explicitly specifying the stronger ordering implies that the guarantees of the weaker
-// property holds too. The names come from the C++11 atomic operations, and typically
-// have a JMM equivalent property.
-// The equivalence may be viewed like this:
-// MO_UNORDERED is equivalent to JMM plain.
-// MO_VOLATILE has no equivalence in JMM, because it's a C++ thing.
-// MO_RELAXED is equivalent to JMM opaque.
-// MO_ACQUIRE is equivalent to JMM acquire.
-// MO_RELEASE is equivalent to JMM release.
-// MO_SEQ_CST is equivalent to JMM volatile.
+// * equals: Object equality, e.g. when different copies of the same objects are in use (from-space vs. to-space)
 //
-// === Stores ===
-//  * MO_UNORDERED (Default): No guarantees.
-//    - The compiler and hardware are free to reorder aggressively. And they will.
-//  * MO_VOLATILE: Volatile stores (in the C++ sense).
-//    - The stores are not reordered by the compiler (but possibly the HW) w.r.t. other
-//      volatile accesses in program order (but possibly non-volatile accesses).
-//  * MO_RELAXED: Relaxed atomic stores.
-//    - The stores are atomic.
-//    - Guarantees from volatile stores hold.
-//  * MO_RELEASE: Releasing stores.
-//    - The releasing store will make its preceding memory accesses observable to memory accesses
-//      subsequent to an acquiring load observing this releasing store.
-//    - Guarantees from relaxed stores hold.
-//  * MO_SEQ_CST: Sequentially consistent stores.
-//    - The stores are observed in the same order by MO_SEQ_CST loads on other processors
-//    - Preceding loads and stores in program order are not reordered with subsequent loads and stores in program order.
-//    - Guarantees from releasing stores hold.
-// === Loads ===
-//  * MO_UNORDERED (Default): No guarantees
-//    - The compiler and hardware are free to reorder aggressively. And they will.
-//  * MO_VOLATILE: Volatile loads (in the C++ sense).
-//    - The loads are not reordered by the compiler (but possibly the HW) w.r.t. other
-//      volatile accesses in program order (but possibly non-volatile accesses).
-//  * MO_RELAXED: Relaxed atomic loads.
-//    - The stores are atomic.
-//    - Guarantees from volatile loads hold.
-//  * MO_ACQUIRE: Acquiring loads.
-//    - An acquiring load will make subsequent memory accesses observe the memory accesses
-//      preceding the releasing store that the acquiring load observed.
-//    - Guarantees from relaxed loads hold.
-//  * MO_SEQ_CST: Sequentially consistent loads.
-//    - These loads observe MO_SEQ_CST stores in the same order on other processors
-//    - Preceding loads and stores in program order are not reordered with subsequent loads and stores in program order.
-//    - Guarantees from acquiring loads hold.
-// === Atomic Cmpxchg ===
-//  * MO_RELAXED: Atomic but relaxed cmpxchg.
-//    - Guarantees from MO_RELAXED loads and MO_RELAXED stores hold unconditionally.
-//  * MO_SEQ_CST: Sequentially consistent cmpxchg.
-//    - Guarantees from MO_SEQ_CST loads and MO_SEQ_CST stores hold unconditionally.
-// === Atomic Xchg ===
-//  * MO_RELAXED: Atomic but relaxed atomic xchg.
-//    - Guarantees from MO_RELAXED loads and MO_RELAXED stores hold.
-//  * MO_SEQ_CST: Sequentially consistent xchg.
-//    - Guarantees from MO_SEQ_CST loads and MO_SEQ_CST stores hold.
-const DecoratorSet MO_UNORDERED      = UCONST64(1) << 6;
-const DecoratorSet MO_VOLATILE       = UCONST64(1) << 7;
-const DecoratorSet MO_RELAXED        = UCONST64(1) << 8;
-const DecoratorSet MO_ACQUIRE        = UCONST64(1) << 9;
-const DecoratorSet MO_RELEASE        = UCONST64(1) << 10;
-const DecoratorSet MO_SEQ_CST        = UCONST64(1) << 11;
-const DecoratorSet MO_DECORATOR_MASK = MO_UNORDERED | MO_VOLATILE | MO_RELAXED |
-                                       MO_ACQUIRE | MO_RELEASE | MO_SEQ_CST;
-
-// === Barrier Strength Decorators ===
-// * AS_RAW: The access will translate into a raw memory access, hence ignoring all semantic concerns
-//   except memory ordering and compressed oops. This will bypass runtime function pointer dispatching
-//   in the pipeline and hardwire to raw accesses without going trough the GC access barriers.
-//  - Accesses on oop* translate to raw memory accesses without runtime checks
-//  - Accesses on narrowOop* translate to encoded/decoded memory accesses without runtime checks
-//  - Accesses on HeapWord* translate to a runtime check choosing one of the above
-//  - Accesses on other types translate to raw memory accesses without runtime checks
-// * AS_DEST_NOT_INITIALIZED: This property can be important to e.g. SATB barriers by
-//   marking that the previous value is uninitialized nonsense rather than a real value.
-// * AS_NO_KEEPALIVE: The barrier is used only on oop references and will not keep any involved objects
-//   alive, regardless of the type of reference being accessed. It will however perform the memory access
-//   in a consistent way w.r.t. e.g. concurrent compaction, so that the right field is being accessed,
-//   or maintain, e.g. intergenerational or interregional pointers if applicable. This should be used with
-//   extreme caution in isolated scopes.
-// * AS_NORMAL: The accesses will be resolved to an accessor on the BarrierSet class, giving the
-//   responsibility of performing the access and what barriers to be performed to the GC. This is the default.
-//   Note that primitive accesses will only be resolved on the barrier set if the appropriate build-time
-//   decorator for enabling primitive barriers is enabled for the build.
-const DecoratorSet AS_RAW                  = UCONST64(1) << 12;
-const DecoratorSet AS_DEST_NOT_INITIALIZED = UCONST64(1) << 13;
-const DecoratorSet AS_NO_KEEPALIVE         = UCONST64(1) << 14;
-const DecoratorSet AS_NORMAL               = UCONST64(1) << 15;
-const DecoratorSet AS_DECORATOR_MASK       = AS_RAW | AS_DEST_NOT_INITIALIZED |
-                                             AS_NO_KEEPALIVE | AS_NORMAL;
-
-// === Reference Strength Decorators ===
-// These decorators only apply to accesses on oop-like types (oop/narrowOop).
-// * ON_STRONG_OOP_REF: Memory access is performed on a strongly reachable reference.
-// * ON_WEAK_OOP_REF: The memory access is performed on a weakly reachable reference.
-// * ON_PHANTOM_OOP_REF: The memory access is performed on a phantomly reachable reference.
-//   This is the same ring of strength as jweak and weak oops in the VM.
-// * ON_UNKNOWN_OOP_REF: The memory access is performed on a reference of unknown strength.
-//   This could for example come from the unsafe API.
-// * Default (no explicit reference strength specified): ON_STRONG_OOP_REF
-const DecoratorSet ON_STRONG_OOP_REF  = UCONST64(1) << 16;
-const DecoratorSet ON_WEAK_OOP_REF    = UCONST64(1) << 17;
-const DecoratorSet ON_PHANTOM_OOP_REF = UCONST64(1) << 18;
-const DecoratorSet ON_UNKNOWN_OOP_REF = UCONST64(1) << 19;
-const DecoratorSet ON_DECORATOR_MASK  = ON_STRONG_OOP_REF | ON_WEAK_OOP_REF |
-                                        ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF;
-
-// === Access Location ===
-// Accesses can take place in, e.g. the heap, old or young generation and different native roots.
-// The location is important to the GC as it may imply different actions. The following decorators are used:
-// * IN_HEAP: The access is performed in the heap. Many barriers such as card marking will
-//   be omitted if this decorator is not set.
-// * IN_HEAP_ARRAY: The access is performed on a heap allocated array. This is sometimes a special case
-//   for some GCs, and implies that it is an IN_HEAP.
-// * IN_ROOT: The access is performed in an off-heap data structure pointing into the Java heap.
-// * IN_CONCURRENT_ROOT: The access is performed in an off-heap data structure pointing into the Java heap,
-//   but is notably not scanned during safepoints. This is sometimes a special case for some GCs and
-//   implies that it is also an IN_ROOT.
-const DecoratorSet IN_HEAP            = UCONST64(1) << 20;
-const DecoratorSet IN_HEAP_ARRAY      = UCONST64(1) << 21;
-const DecoratorSet IN_ROOT            = UCONST64(1) << 22;
-const DecoratorSet IN_CONCURRENT_ROOT = UCONST64(1) << 23;
-const DecoratorSet IN_ARCHIVE_ROOT    = UCONST64(1) << 24;
-const DecoratorSet IN_DECORATOR_MASK  = IN_HEAP | IN_HEAP_ARRAY |
-                                        IN_ROOT | IN_CONCURRENT_ROOT |
-                                        IN_ARCHIVE_ROOT;
-
-// == Value Decorators ==
-// * OOP_NOT_NULL: This property can make certain barriers faster such as compressing oops.
-const DecoratorSet OOP_NOT_NULL       = UCONST64(1) << 25;
-const DecoratorSet OOP_DECORATOR_MASK = OOP_NOT_NULL;
-
-// == Arraycopy Decorators ==
-// * ARRAYCOPY_CHECKCAST: This property means that the class of the objects in source
-//   are not guaranteed to be subclasses of the class of the destination array. This requires
-//   a check-cast barrier during the copying operation. If this is not set, it is assumed
-//   that the array is covariant: (the source array type is-a destination array type)
-// * ARRAYCOPY_DISJOINT: This property means that it is known that the two array ranges
-//   are disjoint.
-// * ARRAYCOPY_ARRAYOF: The copy is in the arrayof form.
-// * ARRAYCOPY_ATOMIC: The accesses have to be atomic over the size of its elements.
-// * ARRAYCOPY_ALIGNED: The accesses have to be aligned on a HeapWord.
-const DecoratorSet ARRAYCOPY_CHECKCAST            = UCONST64(1) << 26;
-const DecoratorSet ARRAYCOPY_DISJOINT             = UCONST64(1) << 27;
-const DecoratorSet ARRAYCOPY_ARRAYOF              = UCONST64(1) << 28;
-const DecoratorSet ARRAYCOPY_ATOMIC               = UCONST64(1) << 29;
-const DecoratorSet ARRAYCOPY_ALIGNED              = UCONST64(1) << 30;
-const DecoratorSet ARRAYCOPY_DECORATOR_MASK       = ARRAYCOPY_CHECKCAST | ARRAYCOPY_DISJOINT |
-                                                    ARRAYCOPY_DISJOINT | ARRAYCOPY_ARRAYOF |
-                                                    ARRAYCOPY_ATOMIC | ARRAYCOPY_ALIGNED;
-
-// The HasDecorator trait can help at compile-time determining whether a decorator set
-// has an intersection with a certain other decorator set
-template <DecoratorSet decorators, DecoratorSet decorator>
-struct HasDecorator: public IntegralConstant<bool, (decorators & decorator) != 0> {};
-
-namespace AccessInternal {
-  template <typename T>
-  struct OopOrNarrowOopInternal: AllStatic {
-    typedef oop type;
-  };
-
-  template <>
-  struct OopOrNarrowOopInternal<narrowOop>: AllStatic {
-    typedef narrowOop type;
-  };
-
-  // This metafunction returns a canonicalized oop/narrowOop type for a passed
-  // in oop-like types passed in from oop_* overloads where the user has sworn
-  // that the passed in values should be oop-like (e.g. oop, oopDesc*, arrayOop,
-  // narrowOoop, instanceOopDesc*, and random other things).
-  // In the oop_* overloads, it must hold that if the passed in type T is not
-  // narrowOop, then it by contract has to be one of many oop-like types implicitly
-  // convertible to oop, and hence returns oop as the canonical oop type.
-  // If it turns out it was not, then the implicit conversion to oop will fail
-  // to compile, as desired.
-  template <typename T>
-  struct OopOrNarrowOop: AllStatic {
-    typedef typename OopOrNarrowOopInternal<typename Decay<T>::type>::type type;
-  };
-
-  inline void* field_addr(oop base, ptrdiff_t byte_offset) {
-    return reinterpret_cast<void*>(reinterpret_cast<intptr_t>((void*)base) + byte_offset);
-  }
-
-  template <DecoratorSet decorators, typename T>
-  void store_at(oop base, ptrdiff_t offset, T value);
-
-  template <DecoratorSet decorators, typename T>
-  T load_at(oop base, ptrdiff_t offset);
-
-  template <DecoratorSet decorators, typename T>
-  T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value);
-
-  template <DecoratorSet decorators, typename T>
-  T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset);
-
-  template <DecoratorSet decorators, typename P, typename T>
-  void store(P* addr, T value);
-
-  template <DecoratorSet decorators, typename P, typename T>
-  T load(P* addr);
-
-  template <DecoratorSet decorators, typename P, typename T>
-  T atomic_cmpxchg(T new_value, P* addr, T compare_value);
-
-  template <DecoratorSet decorators, typename P, typename T>
-  T atomic_xchg(T new_value, P* addr);
-
-  template <DecoratorSet decorators, typename T>
-  bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T *dst, size_t length);
-
-  template <DecoratorSet decorators>
-  void clone(oop src, oop dst, size_t size);
-
-  template <DecoratorSet decorators>
-  oop resolve(oop src);
-
-  // Infer the type that should be returned from a load.
-  template <typename P, DecoratorSet decorators>
-  class LoadProxy: public StackObj {
-  private:
-    P *const _addr;
-  public:
-    LoadProxy(P* addr) : _addr(addr) {}
-
-    template <typename T>
-    inline operator T() {
-      return load<decorators, P, T>(_addr);
-    }
-
-    inline operator P() {
-      return load<decorators, P, P>(_addr);
-    }
-  };
-
-  // Infer the type that should be returned from a load_at.
-  template <DecoratorSet decorators>
-  class LoadAtProxy: public StackObj {
-  private:
-    const oop _base;
-    const ptrdiff_t _offset;
-  public:
-    LoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
-
-    template <typename T>
-    inline operator T() const {
-      return load_at<decorators, T>(_base, _offset);
-    }
-  };
-}
+// == IMPLEMENTATION ==
+// Each access goes through the following steps in a template pipeline.
+// There are essentially 5 steps for each access:
+// * Step 1:   Set default decorators and decay types. This step gets rid of CV qualifiers
+//             and sets default decorators to sensible values.
+// * Step 2:   Reduce types. This step makes sure there is only a single T type and not
+//             multiple types. The P type of the address and T type of the value must
+//             match.
+// * Step 3:   Pre-runtime dispatch. This step checks whether a runtime call can be
+//             avoided, and in that case avoids it (calling raw accesses or
+//             primitive accesses in a build that does not require primitive GC barriers)
+// * Step 4:   Runtime-dispatch. This step performs a runtime dispatch to the corresponding
+//             BarrierSet::AccessBarrier accessor that attaches GC-required barriers
+//             to the access.
+// * Step 5.a: Barrier resolution. This step is invoked the first time a runtime-dispatch
+//             happens for an access. The appropriate BarrierSet::AccessBarrier accessor
+//             is resolved, then the function pointer is updated to that accessor for
+//             future invocations.
+// * Step 5.b: Post-runtime dispatch. This step now casts previously unknown types such
+//             as the address type of an oop on the heap (is it oop* or narrowOop*) to
+//             the appropriate type. It also splits sufficiently orthogonal accesses into
+//             different functions, such as whether the access involves oops or primitives
+//             and whether the access is performed on the heap or outside. Then the
+//             appropriate BarrierSet::AccessBarrier is called to perform the access.
+//
+// The implementation of step 1-4 resides in in accessBackend.hpp, to allow selected
+// accesses to be accessible from only access.hpp, as opposed to access.inline.hpp.
+// Steps 5.a and 5.b require knowledge about the GC backends, and therefore needs to
+// include the various GC backend .inline.hpp headers. Their implementation resides in
+// access.inline.hpp. The accesses that are allowed through the access.hpp file
+// must be instantiated in access.cpp using the INSTANTIATE_HPP_ACCESS macro.
 
 template <DecoratorSet decorators = INTERNAL_EMPTY>
 class Access: public AllStatic {
@@ -409,9 +163,9 @@
   }
 
   // Oop heap accesses
-  static inline AccessInternal::LoadAtProxy<decorators | INTERNAL_VALUE_IS_OOP> oop_load_at(oop base, ptrdiff_t offset) {
+  static inline AccessInternal::OopLoadAtProxy<decorators> oop_load_at(oop base, ptrdiff_t offset) {
     verify_heap_oop_decorators<load_mo_decorators>();
-    return AccessInternal::LoadAtProxy<decorators | INTERNAL_VALUE_IS_OOP>(base, offset);
+    return AccessInternal::OopLoadAtProxy<decorators>(base, offset);
   }
 
   template <typename T>
@@ -478,9 +232,9 @@
 
   // Oop accesses
   template <typename P>
-  static inline AccessInternal::LoadProxy<P, decorators | INTERNAL_VALUE_IS_OOP> oop_load(P* addr) {
+  static inline AccessInternal::OopLoadProxy<P, decorators> oop_load(P* addr) {
     verify_oop_decorators<load_mo_decorators>();
-    return AccessInternal::LoadProxy<P, decorators | INTERNAL_VALUE_IS_OOP>(addr);
+    return AccessInternal::OopLoadProxy<P, decorators>(addr);
   }
 
   template <typename P, typename T>
@@ -512,6 +266,11 @@
     verify_decorators<INTERNAL_EMPTY>();
     return AccessInternal::resolve<decorators>(obj);
   }
+
+  static bool equals(oop o1, oop o2) {
+    verify_decorators<INTERNAL_EMPTY>();
+    return AccessInternal::equals<decorators>(o1, o2);
+  }
 };
 
 // Helper for performing raw accesses (knows only of memory ordering
@@ -529,4 +288,41 @@
 template <DecoratorSet decorators = INTERNAL_EMPTY>
 class RootAccess: public Access<IN_ROOT | decorators> {};
 
-#endif // SHARE_VM_RUNTIME_ACCESS_HPP
+template <DecoratorSet decorators>
+template <DecoratorSet expected_decorators>
+void Access<decorators>::verify_decorators() {
+  STATIC_ASSERT((~expected_decorators & decorators) == 0); // unexpected decorator used
+  const DecoratorSet barrier_strength_decorators = decorators & AS_DECORATOR_MASK;
+  STATIC_ASSERT(barrier_strength_decorators == 0 || ( // make sure barrier strength decorators are disjoint if set
+    (barrier_strength_decorators ^ AS_NO_KEEPALIVE) == 0 ||
+    (barrier_strength_decorators ^ AS_DEST_NOT_INITIALIZED) == 0 ||
+    (barrier_strength_decorators ^ AS_RAW) == 0 ||
+    (barrier_strength_decorators ^ AS_NORMAL) == 0
+  ));
+  const DecoratorSet ref_strength_decorators = decorators & ON_DECORATOR_MASK;
+  STATIC_ASSERT(ref_strength_decorators == 0 || ( // make sure ref strength decorators are disjoint if set
+    (ref_strength_decorators ^ ON_STRONG_OOP_REF) == 0 ||
+    (ref_strength_decorators ^ ON_WEAK_OOP_REF) == 0 ||
+    (ref_strength_decorators ^ ON_PHANTOM_OOP_REF) == 0 ||
+    (ref_strength_decorators ^ ON_UNKNOWN_OOP_REF) == 0
+  ));
+  const DecoratorSet memory_ordering_decorators = decorators & MO_DECORATOR_MASK;
+  STATIC_ASSERT(memory_ordering_decorators == 0 || ( // make sure memory ordering decorators are disjoint if set
+    (memory_ordering_decorators ^ MO_UNORDERED) == 0 ||
+    (memory_ordering_decorators ^ MO_VOLATILE) == 0 ||
+    (memory_ordering_decorators ^ MO_RELAXED) == 0 ||
+    (memory_ordering_decorators ^ MO_ACQUIRE) == 0 ||
+    (memory_ordering_decorators ^ MO_RELEASE) == 0 ||
+    (memory_ordering_decorators ^ MO_SEQ_CST) == 0
+  ));
+  const DecoratorSet location_decorators = decorators & IN_DECORATOR_MASK;
+  STATIC_ASSERT(location_decorators == 0 || ( // make sure location decorators are disjoint if set
+    (location_decorators ^ IN_ROOT) == 0 ||
+    (location_decorators ^ IN_HEAP) == 0 ||
+    (location_decorators ^ (IN_HEAP | IN_HEAP_ARRAY)) == 0 ||
+    (location_decorators ^ (IN_ROOT | IN_CONCURRENT_ROOT)) == 0 ||
+    (location_decorators ^ (IN_ROOT | IN_ARCHIVE_ROOT)) == 0
+  ));
+}
+
+#endif // SHARE_OOPS_ACCESS_HPP
--- a/src/hotspot/share/oops/access.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/access.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -22,43 +22,28 @@
  *
  */
 
-#ifndef SHARE_VM_RUNTIME_ACCESS_INLINE_HPP
-#define SHARE_VM_RUNTIME_ACCESS_INLINE_HPP
+#ifndef SHARE_OOPS_ACCESS_INLINE_HPP
+#define SHARE_OOPS_ACCESS_INLINE_HPP
 
 #include "gc/shared/barrierSetConfig.inline.hpp"
-#include "metaprogramming/conditional.hpp"
-#include "metaprogramming/isFloatingPoint.hpp"
-#include "metaprogramming/isIntegral.hpp"
-#include "metaprogramming/isPointer.hpp"
-#include "metaprogramming/isVolatile.hpp"
 #include "oops/access.hpp"
 #include "oops/accessBackend.inline.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.inline.hpp"
 
-// This file outlines the template pipeline of accesses going through the Access
-// API. There are essentially 5 steps for each access.
-// * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
-//           and sets default decorators to sensible values.
-// * Step 2: Reduce types. This step makes sure there is only a single T type and not
-//           multiple types. The P type of the address and T type of the value must
-//           match.
-// * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
-//           avoided, and in that case avoids it (calling raw accesses or
-//           primitive accesses in a build that does not require primitive GC barriers)
-// * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
-//           BarrierSet::AccessBarrier accessor that attaches GC-required barriers
-//           to the access.
-// * Step 5: Post-runtime dispatch. This step now casts previously unknown types such
-//           as the address type of an oop on the heap (is it oop* or narrowOop*) to
-//           the appropriate type. It also splits sufficiently orthogonal accesses into
-//           different functions, such as whether the access involves oops or primitives
-//           and whether the access is performed on the heap or outside. Then the
-//           appropriate BarrierSet::AccessBarrier is called to perform the access.
+// This file outlines the last 2 steps of the template pipeline of accesses going through
+// the Access API.
+// * Step 5.a: Barrier resolution. This step is invoked the first time a runtime-dispatch
+//             happens for an access. The appropriate BarrierSet::AccessBarrier accessor
+//             is resolved, then the function pointer is updated to that accessor for
+//             future invocations.
+// * Step 5.b: Post-runtime dispatch. This step now casts previously unknown types such
+//             as the address type of an oop on the heap (is it oop* or narrowOop*) to
+//             the appropriate type. It also splits sufficiently orthogonal accesses into
+//             different functions, such as whether the access involves oops or primitives
+//             and whether the access is performed on the heap or outside. Then the
+//             appropriate BarrierSet::AccessBarrier is called to perform the access.
 
 namespace AccessInternal {
-
-  // Step 5: Post-runtime dispatch.
+  // Step 5.b: Post-runtime dispatch.
   // This class is the last step before calling the BarrierSet::AccessBarrier.
   // Here we make sure to figure out types that were not known prior to the
   // runtime dispatch, such as whether an oop on the heap is oop or narrowOop.
@@ -214,6 +199,13 @@
     }
   };
 
+  template <class GCBarrierType, DecoratorSet decorators>
+  struct PostRuntimeDispatch<GCBarrierType, BARRIER_EQUALS, decorators>: public AllStatic {
+    static bool access_barrier(oop o1, oop o2) {
+      return GCBarrierType::equals(o1, o2);
+    }
+  };
+
   // Resolving accessors with barriers from the barrier set happens in two steps.
   // 1. Expand paths with runtime-decorators, e.g. is UseCompressedOops on or off.
   // 2. Expand paths for each BarrierSet available in the system.
@@ -279,7 +271,7 @@
     }
   };
 
-  // Step 4: Runtime dispatch
+  // Step 5.a: Barrier resolution
   // The RuntimeDispatch class is responsible for performing a runtime dispatch of the
   // accessor. This is required when the access either depends on whether compressed oops
   // is being used, or it depends on which GC implementation was chosen (e.g. requires GC
@@ -288,888 +280,89 @@
   // it resolves which accessor to be used in future invocations and patches the
   // function pointer to this new accessor.
 
-  template <DecoratorSet decorators, typename T, BarrierType type>
-  struct RuntimeDispatch: AllStatic {};
-
   template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_STORE>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_STORE>::type func_t;
-    static func_t _store_func;
-
-    static void store_init(void* addr, T value) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE>::resolve_barrier();
-      _store_func = function;
-      function(addr, value);
-    }
-
-    static inline void store(void* addr, T value) {
-      _store_func(addr, value);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_STORE_AT>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type func_t;
-    static func_t _store_at_func;
-
-    static void store_at_init(oop base, ptrdiff_t offset, T value) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE_AT>::resolve_barrier();
-      _store_at_func = function;
-      function(base, offset, value);
-    }
-
-    static inline void store_at(oop base, ptrdiff_t offset, T value) {
-      _store_at_func(base, offset, value);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_LOAD>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_LOAD>::type func_t;
-    static func_t _load_func;
-
-    static T load_init(void* addr) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD>::resolve_barrier();
-      _load_func = function;
-      return function(addr);
-    }
-
-    static inline T load(void* addr) {
-      return _load_func(addr);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type func_t;
-    static func_t _load_at_func;
-
-    static T load_at_init(oop base, ptrdiff_t offset) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD_AT>::resolve_barrier();
-      _load_at_func = function;
-      return function(base, offset);
-    }
-
-    static inline T load_at(oop base, ptrdiff_t offset) {
-      return _load_at_func(base, offset);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t;
-    static func_t _atomic_cmpxchg_func;
-
-    static T atomic_cmpxchg_init(T new_value, void* addr, T compare_value) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG>::resolve_barrier();
-      _atomic_cmpxchg_func = function;
-      return function(new_value, addr, compare_value);
-    }
-
-    static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) {
-      return _atomic_cmpxchg_func(new_value, addr, compare_value);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t;
-    static func_t _atomic_cmpxchg_at_func;
-
-    static T atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T compare_value) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG_AT>::resolve_barrier();
-      _atomic_cmpxchg_at_func = function;
-      return function(new_value, base, offset, compare_value);
-    }
-
-    static inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
-      return _atomic_cmpxchg_at_func(new_value, base, offset, compare_value);
-    }
-  };
+  void RuntimeDispatch<decorators, T, BARRIER_STORE>::store_init(void* addr, T value) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE>::resolve_barrier();
+    _store_func = function;
+    function(addr, value);
+  }
 
   template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t;
-    static func_t _atomic_xchg_func;
-
-    static T atomic_xchg_init(T new_value, void* addr) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG>::resolve_barrier();
-      _atomic_xchg_func = function;
-      return function(new_value, addr);
-    }
-
-    static inline T atomic_xchg(T new_value, void* addr) {
-      return _atomic_xchg_func(new_value, addr);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
-    static func_t _atomic_xchg_at_func;
-
-    static T atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG_AT>::resolve_barrier();
-      _atomic_xchg_at_func = function;
-      return function(new_value, base, offset);
-    }
-
-    static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
-      return _atomic_xchg_at_func(new_value, base, offset);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
-    static func_t _arraycopy_func;
-
-    static bool arraycopy_init(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_ARRAYCOPY>::resolve_barrier();
-      _arraycopy_func = function;
-      return function(src_obj, dst_obj, src, dst, length);
-    }
-
-    static inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
-      return _arraycopy_func(src_obj, dst_obj, src, dst, length);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
-    static func_t _clone_func;
-
-    static void clone_init(oop src, oop dst, size_t size) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_CLONE>::resolve_barrier();
-      _clone_func = function;
-      function(src, dst, size);
-    }
-
-    static inline void clone(oop src, oop dst, size_t size) {
-      _clone_func(src, dst, size);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_RESOLVE>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type func_t;
-    static func_t _resolve_func;
-
-    static oop resolve_init(oop obj) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_RESOLVE>::resolve_barrier();
-      _resolve_func = function;
-      return function(obj);
-    }
-
-    static inline oop resolve(oop obj) {
-      return _resolve_func(obj);
-    }
-  };
-
-  // Initialize the function pointers to point to the resolving function.
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_STORE>::type
-  RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
-  RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_LOAD>::type
-  RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
-  RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
-  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
-  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
-  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
-  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
-  RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_CLONE>::type
-  RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
+  void RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at_init(oop base, ptrdiff_t offset, T value) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE_AT>::resolve_barrier();
+    _store_at_func = function;
+    function(base, offset, value);
+  }
 
   template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type
-  RuntimeDispatch<decorators, T, BARRIER_RESOLVE>::_resolve_func = &resolve_init;
-
-  // Step 3: Pre-runtime dispatching.
-  // The PreRuntimeDispatch class is responsible for filtering the barrier strength
-  // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
-  // dispatch point. Otherwise it goes through a runtime check if hardwiring was
-  // not possible.
-  struct PreRuntimeDispatch: AllStatic {
-    template<DecoratorSet decorators>
-    struct CanHardwireRaw: public IntegralConstant<
-      bool,
-      !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
-      !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
-      HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
-    {};
-
-    static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
-
-    template<DecoratorSet decorators>
-    static bool is_hardwired_primitive() {
-      return !HasDecorator<decorators, INTERNAL_BT_BARRIER_ON_PRIMITIVES>::value &&
-             !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value>::type
-    store(void* addr, T value) {
-      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
-      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
-        Raw::oop_store(addr, value);
-      } else {
-        Raw::store(addr, value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value>::type
-    store(void* addr, T value) {
-      if (UseCompressedOops) {
-        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
-        PreRuntimeDispatch::store<expanded_decorators>(addr, value);
-      } else {
-        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
-        PreRuntimeDispatch::store<expanded_decorators>(addr, value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value>::type
-    store(void* addr, T value) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        PreRuntimeDispatch::store<expanded_decorators>(addr, value);
-      } else {
-        RuntimeDispatch<decorators, T, BARRIER_STORE>::store(addr, value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value>::type
-    store_at(oop base, ptrdiff_t offset, T value) {
-      store<decorators>(field_addr(base, offset), value);
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value>::type
-    store_at(oop base, ptrdiff_t offset, T value) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, value);
-      } else {
-        RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at(base, offset, value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
-    load(void* addr) {
-      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
-      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
-        return Raw::template oop_load<T>(addr);
-      } else {
-        return Raw::template load<T>(addr);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
-    load(void* addr) {
-      if (UseCompressedOops) {
-        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
-        return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
-      } else {
-        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
-        return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value, T>::type
-    load(void* addr) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
-      } else {
-        return RuntimeDispatch<decorators, T, BARRIER_LOAD>::load(addr);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value, T>::type
-    load_at(oop base, ptrdiff_t offset) {
-      return load<decorators, T>(field_addr(base, offset));
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value, T>::type
-    load_at(oop base, ptrdiff_t offset) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        return PreRuntimeDispatch::load_at<expanded_decorators, T>(base, offset);
-      } else {
-        return RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at(base, offset);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
-    atomic_cmpxchg(T new_value, void* addr, T compare_value) {
-      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
-      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
-        return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
-      } else {
-        return Raw::atomic_cmpxchg(new_value, addr, compare_value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
-    atomic_cmpxchg(T new_value, void* addr, T compare_value) {
-      if (UseCompressedOops) {
-        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
-        return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
-      } else {
-        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
-        return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value, T>::type
-    atomic_cmpxchg(T new_value, void* addr, T compare_value) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
-      } else {
-        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(new_value, addr, compare_value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value, T>::type
-    atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
-      return atomic_cmpxchg<decorators>(new_value, field_addr(base, offset), compare_value);
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value, T>::type
-    atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(new_value, base, offset, compare_value);
-      } else {
-        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(new_value, base, offset, compare_value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
-    atomic_xchg(T new_value, void* addr) {
-      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
-      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
-        return Raw::oop_atomic_xchg(new_value, addr);
-      } else {
-        return Raw::atomic_xchg(new_value, addr);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
-    atomic_xchg(T new_value, void* addr) {
-      if (UseCompressedOops) {
-        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
-        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
-      } else {
-        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
-        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value, T>::type
-    atomic_xchg(T new_value, void* addr) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
-      } else {
-        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(new_value, addr);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value, T>::type
-    atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
-      return atomic_xchg<decorators>(new_value, field_addr(base, offset));
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value, T>::type
-    atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, base, offset);
-      } else {
-        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(new_value, base, offset);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, bool>::type
-    arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
-      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
-      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
-        return Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length);
-      } else {
-        return Raw::arraycopy(src_obj, dst_obj, src, dst, length);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, bool>::type
-    arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
-      if (UseCompressedOops) {
-        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
-        return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
-      } else {
-        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
-        return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value, bool>::type
-    arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
-      } else {
-        return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, dst_obj, src, dst, length);
-      }
-    }
-
-    template <DecoratorSet decorators>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value>::type
-    clone(oop src, oop dst, size_t size) {
-      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
-      Raw::clone(src, dst, size);
-    }
-
-    template <DecoratorSet decorators>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value>::type
-    clone(oop src, oop dst, size_t size) {
-      RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
-    }
-
-    template <DecoratorSet decorators>
-    inline static typename EnableIf<
-      HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type
-    resolve(oop obj) {
-      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
-      return Raw::resolve(obj);
-    }
-
-    template <DecoratorSet decorators>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type
-    resolve(oop obj) {
-      return RuntimeDispatch<decorators, oop, BARRIER_RESOLVE>::resolve(obj);
-    }
-  };
-
-  // This class adds implied decorators that follow according to decorator rules.
-  // For example adding default reference strength and default memory ordering
-  // semantics.
-  template <DecoratorSet input_decorators>
-  struct DecoratorFixup: AllStatic {
-    // If no reference strength has been picked, then strong will be picked
-    static const DecoratorSet ref_strength_default = input_decorators |
-      (((ON_DECORATOR_MASK & input_decorators) == 0 && (INTERNAL_VALUE_IS_OOP & input_decorators) != 0) ?
-       ON_STRONG_OOP_REF : INTERNAL_EMPTY);
-    // If no memory ordering has been picked, unordered will be picked
-    static const DecoratorSet memory_ordering_default = ref_strength_default |
-      ((MO_DECORATOR_MASK & ref_strength_default) == 0 ? MO_UNORDERED : INTERNAL_EMPTY);
-    // If no barrier strength has been picked, normal will be used
-    static const DecoratorSet barrier_strength_default = memory_ordering_default |
-      ((AS_DECORATOR_MASK & memory_ordering_default) == 0 ? AS_NORMAL : INTERNAL_EMPTY);
-    // Heap array accesses imply it is a heap access
-    static const DecoratorSet heap_array_is_in_heap = barrier_strength_default |
-      ((IN_HEAP_ARRAY & barrier_strength_default) != 0 ? IN_HEAP : INTERNAL_EMPTY);
-    static const DecoratorSet conc_root_is_root = heap_array_is_in_heap |
-      ((IN_CONCURRENT_ROOT & heap_array_is_in_heap) != 0 ? IN_ROOT : INTERNAL_EMPTY);
-    static const DecoratorSet archive_root_is_root = conc_root_is_root |
-      ((IN_ARCHIVE_ROOT & conc_root_is_root) != 0 ? IN_ROOT : INTERNAL_EMPTY);
-    static const DecoratorSet value = archive_root_is_root | BT_BUILDTIME_DECORATORS;
-  };
-
-  // Step 2: Reduce types.
-  // Enforce that for non-oop types, T and P have to be strictly the same.
-  // P is the type of the address and T is the type of the values.
-  // As for oop types, it is allow to send T in {narrowOop, oop} and
-  // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
-  // the subsequent table. (columns are P, rows are T)
-  // |           | HeapWord  |   oop   | narrowOop |
-  // |   oop     |  rt-comp  | hw-none |  hw-comp  |
-  // | narrowOop |     x     |    x    |  hw-none  |
-  //
-  // x means not allowed
-  // rt-comp means it must be checked at runtime whether the oop is compressed.
-  // hw-none means it is statically known the oop will not be compressed.
-  // hw-comp means it is statically known the oop will be compressed.
+  T RuntimeDispatch<decorators, T, BARRIER_LOAD>::load_init(void* addr) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD>::resolve_barrier();
+    _load_func = function;
+    return function(addr);
+  }
 
   template <DecoratorSet decorators, typename T>
-  inline void store_reduce_types(T* addr, T value) {
-    PreRuntimeDispatch::store<decorators>(addr, value);
-  }
-
-  template <DecoratorSet decorators>
-  inline void store_reduce_types(narrowOop* addr, oop value) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+  T RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at_init(oop base, ptrdiff_t offset) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD_AT>::resolve_barrier();
+    _load_at_func = function;
+    return function(base, offset);
   }
 
-  template <DecoratorSet decorators>
-  inline void store_reduce_types(narrowOop* addr, narrowOop value) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+  template <DecoratorSet decorators, typename T>
+  T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg_init(T new_value, void* addr, T compare_value) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG>::resolve_barrier();
+    _atomic_cmpxchg_func = function;
+    return function(new_value, addr, compare_value);
   }
 
-  template <DecoratorSet decorators>
-  inline void store_reduce_types(HeapWord* addr, oop value) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
-    PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+  template <DecoratorSet decorators, typename T>
+  T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG_AT>::resolve_barrier();
+    _atomic_cmpxchg_at_func = function;
+    return function(new_value, base, offset, compare_value);
   }
 
   template <DecoratorSet decorators, typename T>
-  inline T atomic_cmpxchg_reduce_types(T new_value, T* addr, T compare_value) {
-    return PreRuntimeDispatch::atomic_cmpxchg<decorators>(new_value, addr, compare_value);
-  }
-
-  template <DecoratorSet decorators>
-  inline oop atomic_cmpxchg_reduce_types(oop new_value, narrowOop* addr, oop compare_value) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
-  }
-
-  template <DecoratorSet decorators>
-  inline narrowOop atomic_cmpxchg_reduce_types(narrowOop new_value, narrowOop* addr, narrowOop compare_value) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
-  }
-
-  template <DecoratorSet decorators>
-  inline oop atomic_cmpxchg_reduce_types(oop new_value,
-                                         HeapWord* addr,
-                                         oop compare_value) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
-    return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
-  }
-
-  template <DecoratorSet decorators, typename T>
-  inline T atomic_xchg_reduce_types(T new_value, T* addr) {
-    const DecoratorSet expanded_decorators = decorators;
-    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
-  }
-
-  template <DecoratorSet decorators>
-  inline oop atomic_xchg_reduce_types(oop new_value, narrowOop* addr) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
-  }
-
-  template <DecoratorSet decorators>
-  inline narrowOop atomic_xchg_reduce_types(narrowOop new_value, narrowOop* addr) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
-  }
-
-  template <DecoratorSet decorators>
-  inline oop atomic_xchg_reduce_types(oop new_value, HeapWord* addr) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
-    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
-  }
-
-  template <DecoratorSet decorators, typename T>
-  inline T load_reduce_types(T* addr) {
-    return PreRuntimeDispatch::load<decorators, T>(addr);
+  T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg_init(T new_value, void* addr) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG>::resolve_barrier();
+    _atomic_xchg_func = function;
+    return function(new_value, addr);
   }
 
   template <DecoratorSet decorators, typename T>
-  inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
-  }
-
-  template <DecoratorSet decorators, typename T>
-  inline oop load_reduce_types(HeapWord* addr) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
-    return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
+  T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG_AT>::resolve_barrier();
+    _atomic_xchg_at_func = function;
+    return function(new_value, base, offset);
   }
 
   template <DecoratorSet decorators, typename T>
-  inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
-    return PreRuntimeDispatch::arraycopy<decorators>(src_obj, dst_obj, src, dst, length);
-  }
-
-  template <DecoratorSet decorators>
-  inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, HeapWord* src, HeapWord* dst, size_t length) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
-    return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
-  }
-
-  template <DecoratorSet decorators>
-  inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, narrowOop* src, narrowOop* dst, size_t length) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
-  }
-
-  // Step 1: Set default decorators. This step remembers if a type was volatile
-  // and then sets the MO_VOLATILE decorator by default. Otherwise, a default
-  // memory ordering is set for the access, and the implied decorator rules
-  // are applied to select sensible defaults for decorators that have not been
-  // explicitly set. For example, default object referent strength is set to strong.
-  // This step also decays the types passed in (e.g. getting rid of CV qualifiers
-  // and references from the types). This step also perform some type verification
-  // that the passed in types make sense.
-
-  template <DecoratorSet decorators, typename T>
-  static void verify_types(){
-    // If this fails to compile, then you have sent in something that is
-    // not recognized as a valid primitive type to a primitive Access function.
-    STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
-                   (IsPointer<T>::value || IsIntegral<T>::value) ||
-                    IsFloatingPoint<T>::value)); // not allowed primitive type
-  }
-
-  template <DecoratorSet decorators, typename P, typename T>
-  inline void store(P* addr, T value) {
-    verify_types<decorators, T>();
-    typedef typename Decay<P>::type DecayedP;
-    typedef typename Decay<T>::type DecayedT;
-    DecayedT decayed_value = value;
-    // If a volatile address is passed in but no memory ordering decorator,
-    // set the memory ordering to MO_VOLATILE by default.
-    const DecoratorSet expanded_decorators = DecoratorFixup<
-      (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
-      (MO_VOLATILE | decorators) : decorators>::value;
-    store_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr), decayed_value);
+  bool RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy_init(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_ARRAYCOPY>::resolve_barrier();
+    _arraycopy_func = function;
+    return function(src_obj, dst_obj, src, dst, length);
   }
 
   template <DecoratorSet decorators, typename T>
-  inline void store_at(oop base, ptrdiff_t offset, T value) {
-    verify_types<decorators, T>();
-    typedef typename Decay<T>::type DecayedT;
-    DecayedT decayed_value = value;
-    const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
-                                             (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
-                                              INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
-    PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, decayed_value);
-  }
-
-  template <DecoratorSet decorators, typename P, typename T>
-  inline T load(P* addr) {
-    verify_types<decorators, T>();
-    typedef typename Decay<P>::type DecayedP;
-    typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
-                                 typename OopOrNarrowOop<T>::type,
-                                 typename Decay<T>::type>::type DecayedT;
-    // If a volatile address is passed in but no memory ordering decorator,
-    // set the memory ordering to MO_VOLATILE by default.
-    const DecoratorSet expanded_decorators = DecoratorFixup<
-      (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
-      (MO_VOLATILE | decorators) : decorators>::value;
-    return load_reduce_types<expanded_decorators, DecayedT>(const_cast<DecayedP*>(addr));
-  }
-
-  template <DecoratorSet decorators, typename T>
-  inline T load_at(oop base, ptrdiff_t offset) {
-    verify_types<decorators, T>();
-    typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
-                                 typename OopOrNarrowOop<T>::type,
-                                 typename Decay<T>::type>::type DecayedT;
-    // Expand the decorators (figure out sensible defaults)
-    // Potentially remember if we need compressed oop awareness
-    const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
-                                             (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
-                                              INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
-    return PreRuntimeDispatch::load_at<expanded_decorators, DecayedT>(base, offset);
-  }
-
-  template <DecoratorSet decorators, typename P, typename T>
-  inline T atomic_cmpxchg(T new_value, P* addr, T compare_value) {
-    verify_types<decorators, T>();
-    typedef typename Decay<P>::type DecayedP;
-    typedef typename Decay<T>::type DecayedT;
-    DecayedT new_decayed_value = new_value;
-    DecayedT compare_decayed_value = compare_value;
-    const DecoratorSet expanded_decorators = DecoratorFixup<
-      (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
-      (MO_SEQ_CST | decorators) : decorators>::value;
-    return atomic_cmpxchg_reduce_types<expanded_decorators>(new_decayed_value,
-                                                            const_cast<DecayedP*>(addr),
-                                                            compare_decayed_value);
+  void RuntimeDispatch<decorators, T, BARRIER_CLONE>::clone_init(oop src, oop dst, size_t size) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_CLONE>::resolve_barrier();
+    _clone_func = function;
+    function(src, dst, size);
   }
 
   template <DecoratorSet decorators, typename T>
-  inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
-    verify_types<decorators, T>();
-    typedef typename Decay<T>::type DecayedT;
-    DecayedT new_decayed_value = new_value;
-    DecayedT compare_decayed_value = compare_value;
-    // Determine default memory ordering
-    const DecoratorSet expanded_decorators = DecoratorFixup<
-      (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
-      (MO_SEQ_CST | decorators) : decorators>::value;
-    // Potentially remember that we need compressed oop awareness
-    const DecoratorSet final_decorators = expanded_decorators |
-                                          (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
-                                           INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY);
-    return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(new_decayed_value, base,
-                                                                   offset, compare_decayed_value);
-  }
-
-  template <DecoratorSet decorators, typename P, typename T>
-  inline T atomic_xchg(T new_value, P* addr) {
-    verify_types<decorators, T>();
-    typedef typename Decay<P>::type DecayedP;
-    typedef typename Decay<T>::type DecayedT;
-    DecayedT new_decayed_value = new_value;
-    // atomic_xchg is only available in SEQ_CST flavour.
-    const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
-    return atomic_xchg_reduce_types<expanded_decorators>(new_decayed_value,
-                                                         const_cast<DecayedP*>(addr));
-  }
-
-  template <DecoratorSet decorators, typename T>
-  inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
-    verify_types<decorators, T>();
-    typedef typename Decay<T>::type DecayedT;
-    DecayedT new_decayed_value = new_value;
-    // atomic_xchg is only available in SEQ_CST flavour.
-    const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
-                                             (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
-                                              INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
-    return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(new_decayed_value, base, offset);
+  oop RuntimeDispatch<decorators, T, BARRIER_RESOLVE>::resolve_init(oop obj) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_RESOLVE>::resolve_barrier();
+    _resolve_func = function;
+    return function(obj);
   }
 
   template <DecoratorSet decorators, typename T>
-  inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
-    STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
-                   (IsSame<T, void>::value || IsIntegral<T>::value) ||
-                    IsFloatingPoint<T>::value)); // arraycopy allows type erased void elements
-    typedef typename Decay<T>::type DecayedT;
-    const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IN_HEAP_ARRAY | IN_HEAP>::value;
-    return arraycopy_reduce_types<expanded_decorators>(src_obj, dst_obj,
-                                                       const_cast<DecayedT*>(src),
-                                                       const_cast<DecayedT*>(dst),
-                                                       length);
-  }
-
-  template <DecoratorSet decorators>
-  inline void clone(oop src, oop dst, size_t size) {
-    const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
-    PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
-  }
-
-  template <DecoratorSet decorators>
-  inline oop resolve(oop obj) {
-    const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
-    return PreRuntimeDispatch::resolve<expanded_decorators>(obj);
+  bool RuntimeDispatch<decorators, T, BARRIER_EQUALS>::equals_init(oop o1, oop o2) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_EQUALS>::resolve_barrier();
+    _equals_func = function;
+    return function(o1, o2);
   }
 }
 
-template <DecoratorSet decorators>
-template <DecoratorSet expected_decorators>
-void Access<decorators>::verify_decorators() {
-  STATIC_ASSERT((~expected_decorators & decorators) == 0); // unexpected decorator used
-  const DecoratorSet barrier_strength_decorators = decorators & AS_DECORATOR_MASK;
-  STATIC_ASSERT(barrier_strength_decorators == 0 || ( // make sure barrier strength decorators are disjoint if set
-    (barrier_strength_decorators ^ AS_NO_KEEPALIVE) == 0 ||
-    (barrier_strength_decorators ^ AS_DEST_NOT_INITIALIZED) == 0 ||
-    (barrier_strength_decorators ^ AS_RAW) == 0 ||
-    (barrier_strength_decorators ^ AS_NORMAL) == 0
-  ));
-  const DecoratorSet ref_strength_decorators = decorators & ON_DECORATOR_MASK;
-  STATIC_ASSERT(ref_strength_decorators == 0 || ( // make sure ref strength decorators are disjoint if set
-    (ref_strength_decorators ^ ON_STRONG_OOP_REF) == 0 ||
-    (ref_strength_decorators ^ ON_WEAK_OOP_REF) == 0 ||
-    (ref_strength_decorators ^ ON_PHANTOM_OOP_REF) == 0 ||
-    (ref_strength_decorators ^ ON_UNKNOWN_OOP_REF) == 0
-  ));
-  const DecoratorSet memory_ordering_decorators = decorators & MO_DECORATOR_MASK;
-  STATIC_ASSERT(memory_ordering_decorators == 0 || ( // make sure memory ordering decorators are disjoint if set
-    (memory_ordering_decorators ^ MO_UNORDERED) == 0 ||
-    (memory_ordering_decorators ^ MO_VOLATILE) == 0 ||
-    (memory_ordering_decorators ^ MO_RELAXED) == 0 ||
-    (memory_ordering_decorators ^ MO_ACQUIRE) == 0 ||
-    (memory_ordering_decorators ^ MO_RELEASE) == 0 ||
-    (memory_ordering_decorators ^ MO_SEQ_CST) == 0
-  ));
-  const DecoratorSet location_decorators = decorators & IN_DECORATOR_MASK;
-  STATIC_ASSERT(location_decorators == 0 || ( // make sure location decorators are disjoint if set
-    (location_decorators ^ IN_ROOT) == 0 ||
-    (location_decorators ^ IN_HEAP) == 0 ||
-    (location_decorators ^ (IN_HEAP | IN_HEAP_ARRAY)) == 0 ||
-    (location_decorators ^ (IN_ROOT | IN_CONCURRENT_ROOT)) == 0 ||
-    (location_decorators ^ (IN_ROOT | IN_ARCHIVE_ROOT)) == 0
-  ));
-}
-
-#endif // SHARE_VM_RUNTIME_ACCESS_INLINE_HPP
+#endif // SHARE_OOPS_ACCESS_INLINE_HPP
--- a/src/hotspot/share/oops/accessBackend.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/accessBackend.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -22,16 +22,26 @@
  *
  */
 
-#ifndef SHARE_VM_RUNTIME_ACCESSBACKEND_HPP
-#define SHARE_VM_RUNTIME_ACCESSBACKEND_HPP
+#ifndef SHARE_OOPS_ACCESSBACKEND_HPP
+#define SHARE_OOPS_ACCESSBACKEND_HPP
 
+#include "gc/shared/barrierSetConfig.hpp"
+#include "memory/allocation.hpp"
 #include "metaprogramming/conditional.hpp"
+#include "metaprogramming/decay.hpp"
 #include "metaprogramming/enableIf.hpp"
 #include "metaprogramming/integralConstant.hpp"
+#include "metaprogramming/isFloatingPoint.hpp"
+#include "metaprogramming/isIntegral.hpp"
+#include "metaprogramming/isPointer.hpp"
 #include "metaprogramming/isSame.hpp"
+#include "metaprogramming/isVolatile.hpp"
+#include "oops/accessDecorators.hpp"
+#include "oops/oopsHierarchy.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
 
+
 // This metafunction returns either oop or narrowOop depending on whether
 // an access needs to use compressed oops or not.
 template <DecoratorSet decorators>
@@ -53,7 +63,8 @@
     BARRIER_ATOMIC_XCHG_AT,
     BARRIER_ARRAYCOPY,
     BARRIER_CLONE,
-    BARRIER_RESOLVE
+    BARRIER_RESOLVE,
+    BARRIER_EQUALS
   };
 
   template <DecoratorSet decorators, typename T>
@@ -102,6 +113,7 @@
     typedef bool (*arraycopy_func_t)(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length);
     typedef void (*clone_func_t)(oop src, oop dst, size_t size);
     typedef oop (*resolve_func_t)(oop obj);
+    typedef bool (*equals_func_t)(oop o1, oop o2);
   };
 
   template <DecoratorSet decorators>
@@ -127,6 +139,7 @@
   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_RESOLVE, resolve_func_t);
+  ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_EQUALS, equals_func_t);
 #undef ACCESS_GENERATE_ACCESS_FUNCTION
 
   template <DecoratorSet decorators, typename T, BarrierType barrier_type>
@@ -388,6 +401,974 @@
   static void clone(oop src, oop dst, size_t size);
 
   static oop resolve(oop obj) { return obj; }
+
+  static bool equals(oop o1, oop o2) { return o1 == o2; }
 };
 
-#endif // SHARE_VM_RUNTIME_ACCESSBACKEND_HPP
+// Below is the implementation of the first 4 steps of the template pipeline:
+// * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
+//           and sets default decorators to sensible values.
+// * Step 2: Reduce types. This step makes sure there is only a single T type and not
+//           multiple types. The P type of the address and T type of the value must
+//           match.
+// * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
+//           avoided, and in that case avoids it (calling raw accesses or
+//           primitive accesses in a build that does not require primitive GC barriers)
+// * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
+//           BarrierSet::AccessBarrier accessor that attaches GC-required barriers
+//           to the access.
+
+namespace AccessInternal {
+  template <typename T>
+  struct OopOrNarrowOopInternal: AllStatic {
+    typedef oop type;
+  };
+
+  template <>
+  struct OopOrNarrowOopInternal<narrowOop>: AllStatic {
+    typedef narrowOop type;
+  };
+
+  // This metafunction returns a canonicalized oop/narrowOop type for a passed
+  // in oop-like types passed in from oop_* overloads where the user has sworn
+  // that the passed in values should be oop-like (e.g. oop, oopDesc*, arrayOop,
+  // narrowOoop, instanceOopDesc*, and random other things).
+  // In the oop_* overloads, it must hold that if the passed in type T is not
+  // narrowOop, then it by contract has to be one of many oop-like types implicitly
+  // convertible to oop, and hence returns oop as the canonical oop type.
+  // If it turns out it was not, then the implicit conversion to oop will fail
+  // to compile, as desired.
+  template <typename T>
+  struct OopOrNarrowOop: AllStatic {
+    typedef typename OopOrNarrowOopInternal<typename Decay<T>::type>::type type;
+  };
+
+  inline void* field_addr(oop base, ptrdiff_t byte_offset) {
+    return reinterpret_cast<void*>(reinterpret_cast<intptr_t>((void*)base) + byte_offset);
+  }
+  // Step 4: Runtime dispatch
+  // The RuntimeDispatch class is responsible for performing a runtime dispatch of the
+  // accessor. This is required when the access either depends on whether compressed oops
+  // is being used, or it depends on which GC implementation was chosen (e.g. requires GC
+  // barriers). The way it works is that a function pointer initially pointing to an
+  // accessor resolution function gets called for each access. Upon first invocation,
+  // it resolves which accessor to be used in future invocations and patches the
+  // function pointer to this new accessor.
+
+  template <DecoratorSet decorators, typename T, BarrierType type>
+  struct RuntimeDispatch: AllStatic {};
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_STORE>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_STORE>::type func_t;
+    static func_t _store_func;
+
+    static void store_init(void* addr, T value);
+
+    static inline void store(void* addr, T value) {
+      _store_func(addr, value);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_STORE_AT>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type func_t;
+    static func_t _store_at_func;
+
+    static void store_at_init(oop base, ptrdiff_t offset, T value);
+
+    static inline void store_at(oop base, ptrdiff_t offset, T value) {
+      _store_at_func(base, offset, value);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_LOAD>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_LOAD>::type func_t;
+    static func_t _load_func;
+
+    static T load_init(void* addr);
+
+    static inline T load(void* addr) {
+      return _load_func(addr);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type func_t;
+    static func_t _load_at_func;
+
+    static T load_at_init(oop base, ptrdiff_t offset);
+
+    static inline T load_at(oop base, ptrdiff_t offset) {
+      return _load_at_func(base, offset);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t;
+    static func_t _atomic_cmpxchg_func;
+
+    static T atomic_cmpxchg_init(T new_value, void* addr, T compare_value);
+
+    static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) {
+      return _atomic_cmpxchg_func(new_value, addr, compare_value);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t;
+    static func_t _atomic_cmpxchg_at_func;
+
+    static T atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T compare_value);
+
+    static inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+      return _atomic_cmpxchg_at_func(new_value, base, offset, compare_value);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t;
+    static func_t _atomic_xchg_func;
+
+    static T atomic_xchg_init(T new_value, void* addr);
+
+    static inline T atomic_xchg(T new_value, void* addr) {
+      return _atomic_xchg_func(new_value, addr);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
+    static func_t _atomic_xchg_at_func;
+
+    static T atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset);
+
+    static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+      return _atomic_xchg_at_func(new_value, base, offset);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
+    static func_t _arraycopy_func;
+
+    static bool arraycopy_init(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length);
+
+    static inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
+      return _arraycopy_func(src_obj, dst_obj, src, dst, length);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
+    static func_t _clone_func;
+
+    static void clone_init(oop src, oop dst, size_t size);
+
+    static inline void clone(oop src, oop dst, size_t size) {
+      _clone_func(src, dst, size);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_RESOLVE>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type func_t;
+    static func_t _resolve_func;
+
+    static oop resolve_init(oop obj);
+
+    static inline oop resolve(oop obj) {
+      return _resolve_func(obj);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_EQUALS>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_EQUALS>::type func_t;
+    static func_t _equals_func;
+
+    static bool equals_init(oop o1, oop o2);
+
+    static inline bool equals(oop o1, oop o2) {
+      return _equals_func(o1, o2);
+    }
+  };
+
+  // Initialize the function pointers to point to the resolving function.
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_STORE>::type
+  RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
+  RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_LOAD>::type
+  RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
+  RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
+  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
+  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
+  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
+  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
+  RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_CLONE>::type
+  RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type
+  RuntimeDispatch<decorators, T, BARRIER_RESOLVE>::_resolve_func = &resolve_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_EQUALS>::type
+  RuntimeDispatch<decorators, T, BARRIER_EQUALS>::_equals_func = &equals_init;
+
+  // Step 3: Pre-runtime dispatching.
+  // The PreRuntimeDispatch class is responsible for filtering the barrier strength
+  // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
+  // dispatch point. Otherwise it goes through a runtime check if hardwiring was
+  // not possible.
+  struct PreRuntimeDispatch: AllStatic {
+    template<DecoratorSet decorators>
+    struct CanHardwireRaw: public IntegralConstant<
+      bool,
+      !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
+      !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
+      HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
+    {};
+
+    static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
+
+    template<DecoratorSet decorators>
+    static bool is_hardwired_primitive() {
+      return !HasDecorator<decorators, INTERNAL_BT_BARRIER_ON_PRIMITIVES>::value &&
+             !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value>::type
+    store(void* addr, T value) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
+        Raw::oop_store(addr, value);
+      } else {
+        Raw::store(addr, value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value>::type
+    store(void* addr, T value) {
+      if (UseCompressedOops) {
+        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
+        PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+      } else {
+        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
+        PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value>::type
+    store(void* addr, T value) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+      } else {
+        RuntimeDispatch<decorators, T, BARRIER_STORE>::store(addr, value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value>::type
+    store_at(oop base, ptrdiff_t offset, T value) {
+      store<decorators>(field_addr(base, offset), value);
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value>::type
+    store_at(oop base, ptrdiff_t offset, T value) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, value);
+      } else {
+        RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at(base, offset, value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
+    load(void* addr) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
+        return Raw::template oop_load<T>(addr);
+      } else {
+        return Raw::template load<T>(addr);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
+    load(void* addr) {
+      if (UseCompressedOops) {
+        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
+        return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
+      } else {
+        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
+        return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    load(void* addr) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_LOAD>::load(addr);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value, T>::type
+    load_at(oop base, ptrdiff_t offset) {
+      return load<decorators, T>(field_addr(base, offset));
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    load_at(oop base, ptrdiff_t offset) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::load_at<expanded_decorators, T>(base, offset);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at(base, offset);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
+    atomic_cmpxchg(T new_value, void* addr, T compare_value) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
+        return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
+      } else {
+        return Raw::atomic_cmpxchg(new_value, addr, compare_value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
+    atomic_cmpxchg(T new_value, void* addr, T compare_value) {
+      if (UseCompressedOops) {
+        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
+        return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+      } else {
+        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
+        return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_cmpxchg(T new_value, void* addr, T compare_value) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(new_value, addr, compare_value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+      return atomic_cmpxchg<decorators>(new_value, field_addr(base, offset), compare_value);
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(new_value, base, offset, compare_value);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(new_value, base, offset, compare_value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
+    atomic_xchg(T new_value, void* addr) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
+        return Raw::oop_atomic_xchg(new_value, addr);
+      } else {
+        return Raw::atomic_xchg(new_value, addr);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
+    atomic_xchg(T new_value, void* addr) {
+      if (UseCompressedOops) {
+        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
+        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+      } else {
+        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
+        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_xchg(T new_value, void* addr) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(new_value, addr);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+      return atomic_xchg<decorators>(new_value, field_addr(base, offset));
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, base, offset);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(new_value, base, offset);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, bool>::type
+    arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
+        return Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length);
+      } else {
+        return Raw::arraycopy(src_obj, dst_obj, src, dst, length);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, bool>::type
+    arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+      if (UseCompressedOops) {
+        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
+        return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
+      } else {
+        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
+        return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, bool>::type
+    arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, dst_obj, src, dst, length);
+      }
+    }
+
+    template <DecoratorSet decorators>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value>::type
+    clone(oop src, oop dst, size_t size) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      Raw::clone(src, dst, size);
+    }
+
+    template <DecoratorSet decorators>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value>::type
+    clone(oop src, oop dst, size_t size) {
+      RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
+    }
+
+    template <DecoratorSet decorators>
+    inline static typename EnableIf<
+      HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type
+    resolve(oop obj) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      return Raw::resolve(obj);
+    }
+
+    template <DecoratorSet decorators>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type
+    resolve(oop obj) {
+      return RuntimeDispatch<decorators, oop, BARRIER_RESOLVE>::resolve(obj);
+    }
+
+    template <DecoratorSet decorators>
+    inline static typename EnableIf<
+      HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, bool>::type
+    equals(oop o1, oop o2) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      return Raw::equals(o1, o2);
+    }
+
+    template <DecoratorSet decorators>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, bool>::type
+    equals(oop o1, oop o2) {
+      return RuntimeDispatch<decorators, oop, BARRIER_EQUALS>::equals(o1, o2);
+    }
+  };
+
+  // This class adds implied decorators that follow according to decorator rules.
+  // For example adding default reference strength and default memory ordering
+  // semantics.
+  template <DecoratorSet input_decorators>
+  struct DecoratorFixup: AllStatic {
+    // If no reference strength has been picked, then strong will be picked
+    static const DecoratorSet ref_strength_default = input_decorators |
+      (((ON_DECORATOR_MASK & input_decorators) == 0 && (INTERNAL_VALUE_IS_OOP & input_decorators) != 0) ?
+       ON_STRONG_OOP_REF : INTERNAL_EMPTY);
+    // If no memory ordering has been picked, unordered will be picked
+    static const DecoratorSet memory_ordering_default = ref_strength_default |
+      ((MO_DECORATOR_MASK & ref_strength_default) == 0 ? MO_UNORDERED : INTERNAL_EMPTY);
+    // If no barrier strength has been picked, normal will be used
+    static const DecoratorSet barrier_strength_default = memory_ordering_default |
+      ((AS_DECORATOR_MASK & memory_ordering_default) == 0 ? AS_NORMAL : INTERNAL_EMPTY);
+    // Heap array accesses imply it is a heap access
+    static const DecoratorSet heap_array_is_in_heap = barrier_strength_default |
+      ((IN_HEAP_ARRAY & barrier_strength_default) != 0 ? IN_HEAP : INTERNAL_EMPTY);
+    static const DecoratorSet conc_root_is_root = heap_array_is_in_heap |
+      ((IN_CONCURRENT_ROOT & heap_array_is_in_heap) != 0 ? IN_ROOT : INTERNAL_EMPTY);
+    static const DecoratorSet archive_root_is_root = conc_root_is_root |
+      ((IN_ARCHIVE_ROOT & conc_root_is_root) != 0 ? IN_ROOT : INTERNAL_EMPTY);
+    static const DecoratorSet value = archive_root_is_root | BT_BUILDTIME_DECORATORS;
+  };
+
+  // Step 2: Reduce types.
+  // Enforce that for non-oop types, T and P have to be strictly the same.
+  // P is the type of the address and T is the type of the values.
+  // As for oop types, it is allow to send T in {narrowOop, oop} and
+  // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
+  // the subsequent table. (columns are P, rows are T)
+  // |           | HeapWord  |   oop   | narrowOop |
+  // |   oop     |  rt-comp  | hw-none |  hw-comp  |
+  // | narrowOop |     x     |    x    |  hw-none  |
+  //
+  // x means not allowed
+  // rt-comp means it must be checked at runtime whether the oop is compressed.
+  // hw-none means it is statically known the oop will not be compressed.
+  // hw-comp means it is statically known the oop will be compressed.
+
+  template <DecoratorSet decorators, typename T>
+  inline void store_reduce_types(T* addr, T value) {
+    PreRuntimeDispatch::store<decorators>(addr, value);
+  }
+
+  template <DecoratorSet decorators>
+  inline void store_reduce_types(narrowOop* addr, oop value) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+  }
+
+  template <DecoratorSet decorators>
+  inline void store_reduce_types(narrowOop* addr, narrowOop value) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+  }
+
+  template <DecoratorSet decorators>
+  inline void store_reduce_types(HeapWord* addr, oop value) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
+    PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T atomic_cmpxchg_reduce_types(T new_value, T* addr, T compare_value) {
+    return PreRuntimeDispatch::atomic_cmpxchg<decorators>(new_value, addr, compare_value);
+  }
+
+  template <DecoratorSet decorators>
+  inline oop atomic_cmpxchg_reduce_types(oop new_value, narrowOop* addr, oop compare_value) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+  }
+
+  template <DecoratorSet decorators>
+  inline narrowOop atomic_cmpxchg_reduce_types(narrowOop new_value, narrowOop* addr, narrowOop compare_value) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+  }
+
+  template <DecoratorSet decorators>
+  inline oop atomic_cmpxchg_reduce_types(oop new_value,
+                                         HeapWord* addr,
+                                         oop compare_value) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
+    return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T atomic_xchg_reduce_types(T new_value, T* addr) {
+    const DecoratorSet expanded_decorators = decorators;
+    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+  }
+
+  template <DecoratorSet decorators>
+  inline oop atomic_xchg_reduce_types(oop new_value, narrowOop* addr) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+  }
+
+  template <DecoratorSet decorators>
+  inline narrowOop atomic_xchg_reduce_types(narrowOop new_value, narrowOop* addr) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+  }
+
+  template <DecoratorSet decorators>
+  inline oop atomic_xchg_reduce_types(oop new_value, HeapWord* addr) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
+    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T load_reduce_types(T* addr) {
+    return PreRuntimeDispatch::load<decorators, T>(addr);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline oop load_reduce_types(HeapWord* addr) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
+    return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+    return PreRuntimeDispatch::arraycopy<decorators>(src_obj, dst_obj, src, dst, length);
+  }
+
+  template <DecoratorSet decorators>
+  inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, HeapWord* src, HeapWord* dst, size_t length) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
+    return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
+  }
+
+  template <DecoratorSet decorators>
+  inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, narrowOop* src, narrowOop* dst, size_t length) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
+  }
+
+  // Step 1: Set default decorators. This step remembers if a type was volatile
+  // and then sets the MO_VOLATILE decorator by default. Otherwise, a default
+  // memory ordering is set for the access, and the implied decorator rules
+  // are applied to select sensible defaults for decorators that have not been
+  // explicitly set. For example, default object referent strength is set to strong.
+  // This step also decays the types passed in (e.g. getting rid of CV qualifiers
+  // and references from the types). This step also perform some type verification
+  // that the passed in types make sense.
+
+  template <DecoratorSet decorators, typename T>
+  static void verify_types(){
+    // If this fails to compile, then you have sent in something that is
+    // not recognized as a valid primitive type to a primitive Access function.
+    STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
+                   (IsPointer<T>::value || IsIntegral<T>::value) ||
+                    IsFloatingPoint<T>::value)); // not allowed primitive type
+  }
+
+  template <DecoratorSet decorators, typename P, typename T>
+  inline void store(P* addr, T value) {
+    verify_types<decorators, T>();
+    typedef typename Decay<P>::type DecayedP;
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT decayed_value = value;
+    // If a volatile address is passed in but no memory ordering decorator,
+    // set the memory ordering to MO_VOLATILE by default.
+    const DecoratorSet expanded_decorators = DecoratorFixup<
+      (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
+      (MO_VOLATILE | decorators) : decorators>::value;
+    store_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr), decayed_value);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline void store_at(oop base, ptrdiff_t offset, T value) {
+    verify_types<decorators, T>();
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT decayed_value = value;
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
+                                             (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
+                                              INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
+    PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, decayed_value);
+  }
+
+  template <DecoratorSet decorators, typename P, typename T>
+  inline T load(P* addr) {
+    verify_types<decorators, T>();
+    typedef typename Decay<P>::type DecayedP;
+    typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
+                                 typename OopOrNarrowOop<T>::type,
+                                 typename Decay<T>::type>::type DecayedT;
+    // If a volatile address is passed in but no memory ordering decorator,
+    // set the memory ordering to MO_VOLATILE by default.
+    const DecoratorSet expanded_decorators = DecoratorFixup<
+      (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
+      (MO_VOLATILE | decorators) : decorators>::value;
+    return load_reduce_types<expanded_decorators, DecayedT>(const_cast<DecayedP*>(addr));
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T load_at(oop base, ptrdiff_t offset) {
+    verify_types<decorators, T>();
+    typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
+                                 typename OopOrNarrowOop<T>::type,
+                                 typename Decay<T>::type>::type DecayedT;
+    // Expand the decorators (figure out sensible defaults)
+    // Potentially remember if we need compressed oop awareness
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
+                                             (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
+                                              INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
+    return PreRuntimeDispatch::load_at<expanded_decorators, DecayedT>(base, offset);
+  }
+
+  template <DecoratorSet decorators, typename P, typename T>
+  inline T atomic_cmpxchg(T new_value, P* addr, T compare_value) {
+    verify_types<decorators, T>();
+    typedef typename Decay<P>::type DecayedP;
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT new_decayed_value = new_value;
+    DecayedT compare_decayed_value = compare_value;
+    const DecoratorSet expanded_decorators = DecoratorFixup<
+      (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
+      (MO_SEQ_CST | decorators) : decorators>::value;
+    return atomic_cmpxchg_reduce_types<expanded_decorators>(new_decayed_value,
+                                                            const_cast<DecayedP*>(addr),
+                                                            compare_decayed_value);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+    verify_types<decorators, T>();
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT new_decayed_value = new_value;
+    DecayedT compare_decayed_value = compare_value;
+    // Determine default memory ordering
+    const DecoratorSet expanded_decorators = DecoratorFixup<
+      (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
+      (MO_SEQ_CST | decorators) : decorators>::value;
+    // Potentially remember that we need compressed oop awareness
+    const DecoratorSet final_decorators = expanded_decorators |
+                                          (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
+                                           INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY);
+    return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(new_decayed_value, base,
+                                                                   offset, compare_decayed_value);
+  }
+
+  template <DecoratorSet decorators, typename P, typename T>
+  inline T atomic_xchg(T new_value, P* addr) {
+    verify_types<decorators, T>();
+    typedef typename Decay<P>::type DecayedP;
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT new_decayed_value = new_value;
+    // atomic_xchg is only available in SEQ_CST flavour.
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
+    return atomic_xchg_reduce_types<expanded_decorators>(new_decayed_value,
+                                                         const_cast<DecayedP*>(addr));
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+    verify_types<decorators, T>();
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT new_decayed_value = new_value;
+    // atomic_xchg is only available in SEQ_CST flavour.
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
+                                             (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
+                                              INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
+    return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(new_decayed_value, base, offset);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+    STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
+                   (IsSame<T, void>::value || IsIntegral<T>::value) ||
+                    IsFloatingPoint<T>::value)); // arraycopy allows type erased void elements
+    typedef typename Decay<T>::type DecayedT;
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IN_HEAP_ARRAY | IN_HEAP>::value;
+    return arraycopy_reduce_types<expanded_decorators>(src_obj, dst_obj,
+                                                       const_cast<DecayedT*>(src),
+                                                       const_cast<DecayedT*>(dst),
+                                                       length);
+  }
+
+  template <DecoratorSet decorators>
+  inline void clone(oop src, oop dst, size_t size) {
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
+    PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
+  }
+
+  template <DecoratorSet decorators>
+  inline oop resolve(oop obj) {
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
+    return PreRuntimeDispatch::resolve<expanded_decorators>(obj);
+  }
+
+  template <DecoratorSet decorators>
+  inline bool equals(oop o1, oop o2) {
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
+    return PreRuntimeDispatch::equals<expanded_decorators>(o1, o2);
+  }
+
+  // Infer the type that should be returned from an Access::oop_load.
+  template <typename P, DecoratorSet decorators>
+  class OopLoadProxy: public StackObj {
+  private:
+    P *const _addr;
+  public:
+    OopLoadProxy(P* addr) : _addr(addr) {}
+
+    inline operator oop() {
+      return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
+    }
+
+    inline operator narrowOop() {
+      return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
+    }
+
+    template <typename T>
+    inline bool operator ==(const T& other) const {
+      return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
+    }
+
+    template <typename T>
+    inline bool operator !=(const T& other) const {
+      return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) != other;
+    }
+  };
+
+  // Infer the type that should be returned from an Access::load_at.
+  template <DecoratorSet decorators>
+  class LoadAtProxy: public StackObj {
+  private:
+    const oop _base;
+    const ptrdiff_t _offset;
+  public:
+    LoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
+
+    template <typename T>
+    inline operator T() const {
+      return load_at<decorators, T>(_base, _offset);
+    }
+
+    template <typename T>
+    inline bool operator ==(const T& other) const { return load_at<decorators, T>(_base, _offset) == other; }
+
+    template <typename T>
+    inline bool operator !=(const T& other) const { return load_at<decorators, T>(_base, _offset) != other; }
+  };
+
+  // Infer the type that should be returned from an Access::oop_load_at.
+  template <DecoratorSet decorators>
+  class OopLoadAtProxy: public StackObj {
+  private:
+    const oop _base;
+    const ptrdiff_t _offset;
+  public:
+    OopLoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
+
+    inline operator oop() const {
+      return load_at<decorators | INTERNAL_VALUE_IS_OOP, oop>(_base, _offset);
+    }
+
+    inline operator narrowOop() const {
+      return load_at<decorators | INTERNAL_VALUE_IS_OOP, narrowOop>(_base, _offset);
+    }
+
+    template <typename T>
+    inline bool operator ==(const T& other) const {
+      return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) == other;
+    }
+
+    template <typename T>
+    inline bool operator !=(const T& other) const {
+      return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) != other;
+    }
+  };
+}
+
+#endif // SHARE_OOPS_ACCESSBACKEND_HPP
--- a/src/hotspot/share/oops/accessBackend.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/accessBackend.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,7 +27,8 @@
 
 #include "oops/access.hpp"
 #include "oops/accessBackend.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "oops/oopsHierarchy.hpp"
 
 template <DecoratorSet decorators>
 template <DecoratorSet idecorators, typename T>
@@ -35,9 +36,9 @@
   AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
 RawAccessBarrier<decorators>::decode_internal(typename HeapOopType<idecorators>::type value) {
   if (HasDecorator<decorators, OOP_NOT_NULL>::value) {
-    return oopDesc::decode_heap_oop_not_null(value);
+    return CompressedOops::decode_not_null(value);
   } else {
-    return oopDesc::decode_heap_oop(value);
+    return CompressedOops::decode(value);
   }
 }
 
@@ -48,9 +49,9 @@
   typename HeapOopType<idecorators>::type>::type
 RawAccessBarrier<decorators>::encode_internal(T value) {
   if (HasDecorator<decorators, OOP_NOT_NULL>::value) {
-    return oopDesc::encode_heap_oop_not_null(value);
+    return CompressedOops::encode_not_null(value);
   } else {
-    return oopDesc::encode_heap_oop(value);
+    return CompressedOops::encode(value);
   }
 }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/oops/accessDecorators.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_OOPS_ACCESSDECORATORS_HPP
+#define SHARE_OOPS_ACCESSDECORATORS_HPP
+
+// A decorator is an attribute or property that affects the way a memory access is performed in some way.
+// There are different groups of decorators. Some have to do with memory ordering, others to do with,
+// e.g. strength of references, strength of GC barriers, or whether compression should be applied or not.
+// Some decorators are set at buildtime, such as whether primitives require GC barriers or not, others
+// at callsites such as whether an access is in the heap or not, and others are resolved at runtime
+// such as GC-specific barriers and encoding/decoding compressed oops.
+typedef uint64_t DecoratorSet;
+
+// The HasDecorator trait can help at compile-time determining whether a decorator set
+// has an intersection with a certain other decorator set
+template <DecoratorSet decorators, DecoratorSet decorator>
+struct HasDecorator: public IntegralConstant<bool, (decorators & decorator) != 0> {};
+
+// == Internal Decorators - do not use ==
+// * INTERNAL_EMPTY: This is the name for the empty decorator set (in absence of other decorators).
+// * INTERNAL_CONVERT_COMPRESSED_OOPS: This is an oop access that will require converting an oop
+//   to a narrowOop or vice versa, if UseCompressedOops is known to be set.
+// * INTERNAL_VALUE_IS_OOP: Remember that the involved access is on oop rather than primitive.
+const DecoratorSet INTERNAL_EMPTY                    = UCONST64(0);
+const DecoratorSet INTERNAL_CONVERT_COMPRESSED_OOP   = UCONST64(1) << 1;
+const DecoratorSet INTERNAL_VALUE_IS_OOP             = UCONST64(1) << 2;
+
+// == Internal build-time Decorators ==
+// * INTERNAL_BT_BARRIER_ON_PRIMITIVES: This is set in the barrierSetConfig.hpp file.
+// * INTERNAL_BT_TO_SPACE_INVARIANT: This is set in the barrierSetConfig.hpp file iff
+//   no GC is bundled in the build that is to-space invariant.
+const DecoratorSet INTERNAL_BT_BARRIER_ON_PRIMITIVES = UCONST64(1) << 3;
+const DecoratorSet INTERNAL_BT_TO_SPACE_INVARIANT    = UCONST64(1) << 4;
+
+// == Internal run-time Decorators ==
+// * INTERNAL_RT_USE_COMPRESSED_OOPS: This decorator will be set in runtime resolved
+//   access backends iff UseCompressedOops is true.
+const DecoratorSet INTERNAL_RT_USE_COMPRESSED_OOPS   = UCONST64(1) << 5;
+
+const DecoratorSet INTERNAL_DECORATOR_MASK           = INTERNAL_CONVERT_COMPRESSED_OOP | INTERNAL_VALUE_IS_OOP |
+                                                       INTERNAL_BT_BARRIER_ON_PRIMITIVES | INTERNAL_RT_USE_COMPRESSED_OOPS;
+
+// == Memory Ordering Decorators ==
+// The memory ordering decorators can be described in the following way:
+// === Decorator Rules ===
+// The different types of memory ordering guarantees have a strict order of strength.
+// Explicitly specifying the stronger ordering implies that the guarantees of the weaker
+// property holds too. The names come from the C++11 atomic operations, and typically
+// have a JMM equivalent property.
+// The equivalence may be viewed like this:
+// MO_UNORDERED is equivalent to JMM plain.
+// MO_VOLATILE has no equivalence in JMM, because it's a C++ thing.
+// MO_RELAXED is equivalent to JMM opaque.
+// MO_ACQUIRE is equivalent to JMM acquire.
+// MO_RELEASE is equivalent to JMM release.
+// MO_SEQ_CST is equivalent to JMM volatile.
+//
+// === Stores ===
+//  * MO_UNORDERED (Default): No guarantees.
+//    - The compiler and hardware are free to reorder aggressively. And they will.
+//  * MO_VOLATILE: Volatile stores (in the C++ sense).
+//    - The stores are not reordered by the compiler (but possibly the HW) w.r.t. other
+//      volatile accesses in program order (but possibly non-volatile accesses).
+//  * MO_RELAXED: Relaxed atomic stores.
+//    - The stores are atomic.
+//    - Guarantees from volatile stores hold.
+//  * MO_RELEASE: Releasing stores.
+//    - The releasing store will make its preceding memory accesses observable to memory accesses
+//      subsequent to an acquiring load observing this releasing store.
+//    - Guarantees from relaxed stores hold.
+//  * MO_SEQ_CST: Sequentially consistent stores.
+//    - The stores are observed in the same order by MO_SEQ_CST loads on other processors
+//    - Preceding loads and stores in program order are not reordered with subsequent loads and stores in program order.
+//    - Guarantees from releasing stores hold.
+// === Loads ===
+//  * MO_UNORDERED (Default): No guarantees
+//    - The compiler and hardware are free to reorder aggressively. And they will.
+//  * MO_VOLATILE: Volatile loads (in the C++ sense).
+//    - The loads are not reordered by the compiler (but possibly the HW) w.r.t. other
+//      volatile accesses in program order (but possibly non-volatile accesses).
+//  * MO_RELAXED: Relaxed atomic loads.
+//    - The loads are atomic.
+//    - Guarantees from volatile loads hold.
+//  * MO_ACQUIRE: Acquiring loads.
+//    - An acquiring load will make subsequent memory accesses observe the memory accesses
+//      preceding the releasing store that the acquiring load observed.
+//    - Guarantees from relaxed loads hold.
+//  * MO_SEQ_CST: Sequentially consistent loads.
+//    - These loads observe MO_SEQ_CST stores in the same order on other processors
+//    - Preceding loads and stores in program order are not reordered with subsequent loads and stores in program order.
+//    - Guarantees from acquiring loads hold.
+// === Atomic Cmpxchg ===
+//  * MO_RELAXED: Atomic but relaxed cmpxchg.
+//    - Guarantees from MO_RELAXED loads and MO_RELAXED stores hold unconditionally.
+//  * MO_SEQ_CST: Sequentially consistent cmpxchg.
+//    - Guarantees from MO_SEQ_CST loads and MO_SEQ_CST stores hold unconditionally.
+// === Atomic Xchg ===
+//  * MO_RELAXED: Atomic but relaxed atomic xchg.
+//    - Guarantees from MO_RELAXED loads and MO_RELAXED stores hold.
+//  * MO_SEQ_CST: Sequentially consistent xchg.
+//    - Guarantees from MO_SEQ_CST loads and MO_SEQ_CST stores hold.
+const DecoratorSet MO_UNORDERED      = UCONST64(1) << 6;
+const DecoratorSet MO_VOLATILE       = UCONST64(1) << 7;
+const DecoratorSet MO_RELAXED        = UCONST64(1) << 8;
+const DecoratorSet MO_ACQUIRE        = UCONST64(1) << 9;
+const DecoratorSet MO_RELEASE        = UCONST64(1) << 10;
+const DecoratorSet MO_SEQ_CST        = UCONST64(1) << 11;
+const DecoratorSet MO_DECORATOR_MASK = MO_UNORDERED | MO_VOLATILE | MO_RELAXED |
+                                       MO_ACQUIRE | MO_RELEASE | MO_SEQ_CST;
+
+// === Barrier Strength Decorators ===
+// * AS_RAW: The access will translate into a raw memory access, hence ignoring all semantic concerns
+//   except memory ordering and compressed oops. This will bypass runtime function pointer dispatching
+//   in the pipeline and hardwire to raw accesses without going trough the GC access barriers.
+//  - Accesses on oop* translate to raw memory accesses without runtime checks
+//  - Accesses on narrowOop* translate to encoded/decoded memory accesses without runtime checks
+//  - Accesses on HeapWord* translate to a runtime check choosing one of the above
+//  - Accesses on other types translate to raw memory accesses without runtime checks
+// * AS_DEST_NOT_INITIALIZED: This property can be important to e.g. SATB barriers by
+//   marking that the previous value is uninitialized nonsense rather than a real value.
+// * AS_NO_KEEPALIVE: The barrier is used only on oop references and will not keep any involved objects
+//   alive, regardless of the type of reference being accessed. It will however perform the memory access
+//   in a consistent way w.r.t. e.g. concurrent compaction, so that the right field is being accessed,
+//   or maintain, e.g. intergenerational or interregional pointers if applicable. This should be used with
+//   extreme caution in isolated scopes.
+// * AS_NORMAL: The accesses will be resolved to an accessor on the BarrierSet class, giving the
+//   responsibility of performing the access and what barriers to be performed to the GC. This is the default.
+//   Note that primitive accesses will only be resolved on the barrier set if the appropriate build-time
+//   decorator for enabling primitive barriers is enabled for the build.
+const DecoratorSet AS_RAW                  = UCONST64(1) << 12;
+const DecoratorSet AS_DEST_NOT_INITIALIZED = UCONST64(1) << 13;
+const DecoratorSet AS_NO_KEEPALIVE         = UCONST64(1) << 14;
+const DecoratorSet AS_NORMAL               = UCONST64(1) << 15;
+const DecoratorSet AS_DECORATOR_MASK       = AS_RAW | AS_DEST_NOT_INITIALIZED |
+                                             AS_NO_KEEPALIVE | AS_NORMAL;
+
+// === Reference Strength Decorators ===
+// These decorators only apply to accesses on oop-like types (oop/narrowOop).
+// * ON_STRONG_OOP_REF: Memory access is performed on a strongly reachable reference.
+// * ON_WEAK_OOP_REF: The memory access is performed on a weakly reachable reference.
+// * ON_PHANTOM_OOP_REF: The memory access is performed on a phantomly reachable reference.
+//   This is the same ring of strength as jweak and weak oops in the VM.
+// * ON_UNKNOWN_OOP_REF: The memory access is performed on a reference of unknown strength.
+//   This could for example come from the unsafe API.
+// * Default (no explicit reference strength specified): ON_STRONG_OOP_REF
+const DecoratorSet ON_STRONG_OOP_REF  = UCONST64(1) << 16;
+const DecoratorSet ON_WEAK_OOP_REF    = UCONST64(1) << 17;
+const DecoratorSet ON_PHANTOM_OOP_REF = UCONST64(1) << 18;
+const DecoratorSet ON_UNKNOWN_OOP_REF = UCONST64(1) << 19;
+const DecoratorSet ON_DECORATOR_MASK  = ON_STRONG_OOP_REF | ON_WEAK_OOP_REF |
+                                        ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF;
+
+// === Access Location ===
+// Accesses can take place in, e.g. the heap, old or young generation and different native roots.
+// The location is important to the GC as it may imply different actions. The following decorators are used:
+// * IN_HEAP: The access is performed in the heap. Many barriers such as card marking will
+//   be omitted if this decorator is not set.
+// * IN_HEAP_ARRAY: The access is performed on a heap allocated array. This is sometimes a special case
+//   for some GCs, and implies that it is an IN_HEAP.
+// * IN_ROOT: The access is performed in an off-heap data structure pointing into the Java heap.
+// * IN_CONCURRENT_ROOT: The access is performed in an off-heap data structure pointing into the Java heap,
+//   but is notably not scanned during safepoints. This is sometimes a special case for some GCs and
+//   implies that it is also an IN_ROOT.
+const DecoratorSet IN_HEAP            = UCONST64(1) << 20;
+const DecoratorSet IN_HEAP_ARRAY      = UCONST64(1) << 21;
+const DecoratorSet IN_ROOT            = UCONST64(1) << 22;
+const DecoratorSet IN_CONCURRENT_ROOT = UCONST64(1) << 23;
+const DecoratorSet IN_ARCHIVE_ROOT    = UCONST64(1) << 24;
+const DecoratorSet IN_DECORATOR_MASK  = IN_HEAP | IN_HEAP_ARRAY |
+                                        IN_ROOT | IN_CONCURRENT_ROOT |
+                                        IN_ARCHIVE_ROOT;
+
+// == Value Decorators ==
+// * OOP_NOT_NULL: This property can make certain barriers faster such as compressing oops.
+const DecoratorSet OOP_NOT_NULL       = UCONST64(1) << 25;
+const DecoratorSet OOP_DECORATOR_MASK = OOP_NOT_NULL;
+
+// == Arraycopy Decorators ==
+// * ARRAYCOPY_CHECKCAST: This property means that the class of the objects in source
+//   are not guaranteed to be subclasses of the class of the destination array. This requires
+//   a check-cast barrier during the copying operation. If this is not set, it is assumed
+//   that the array is covariant: (the source array type is-a destination array type)
+// * ARRAYCOPY_DISJOINT: This property means that it is known that the two array ranges
+//   are disjoint.
+// * ARRAYCOPY_ARRAYOF: The copy is in the arrayof form.
+// * ARRAYCOPY_ATOMIC: The accesses have to be atomic over the size of its elements.
+// * ARRAYCOPY_ALIGNED: The accesses have to be aligned on a HeapWord.
+const DecoratorSet ARRAYCOPY_CHECKCAST            = UCONST64(1) << 26;
+const DecoratorSet ARRAYCOPY_DISJOINT             = UCONST64(1) << 27;
+const DecoratorSet ARRAYCOPY_ARRAYOF              = UCONST64(1) << 28;
+const DecoratorSet ARRAYCOPY_ATOMIC               = UCONST64(1) << 29;
+const DecoratorSet ARRAYCOPY_ALIGNED              = UCONST64(1) << 30;
+const DecoratorSet ARRAYCOPY_DECORATOR_MASK       = ARRAYCOPY_CHECKCAST | ARRAYCOPY_DISJOINT |
+                                                    ARRAYCOPY_DISJOINT | ARRAYCOPY_ARRAYOF |
+                                                    ARRAYCOPY_ATOMIC | ARRAYCOPY_ALIGNED;
+
+#endif // SHARE_OOPS_ACCESSDECORATORS_HPP
--- a/src/hotspot/share/oops/arrayKlass.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/arrayKlass.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,7 +27,6 @@
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "jvmtifiles/jvmti.h"
 #include "memory/metaspaceClosure.hpp"
 #include "memory/resourceArea.hpp"
--- a/src/hotspot/share/oops/compiledICHolder.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/compiledICHolder.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,16 +24,14 @@
 
 #include "precompiled.hpp"
 #include "oops/compiledICHolder.hpp"
-#include "oops/klass.hpp"
-#include "oops/method.hpp"
 #include "runtime/atomic.hpp"
 
 volatile int CompiledICHolder::_live_count;
 volatile int CompiledICHolder::_live_not_claimed_count;
 
 
-CompiledICHolder::CompiledICHolder(Metadata* metadata, Klass* klass)
-  : _holder_metadata(metadata), _holder_klass(klass) {
+CompiledICHolder::CompiledICHolder(Metadata* metadata, Klass* klass, bool is_method)
+  : _holder_metadata(metadata), _holder_klass(klass), _is_metadata_method(is_method) {
 #ifdef ASSERT
   Atomic::inc(&_live_count);
   Atomic::inc(&_live_not_claimed_count);
@@ -47,22 +45,6 @@
 }
 #endif // ASSERT
 
-bool CompiledICHolder::is_loader_alive(BoolObjectClosure* is_alive) {
-  if (_holder_metadata->is_method()) {
-    if (!((Method*)_holder_metadata)->method_holder()->is_loader_alive(is_alive)) {
-      return false;
-    }
-  } else if (_holder_metadata->is_klass()) {
-    if (!((Klass*)_holder_metadata)->is_loader_alive(is_alive)) {
-      return false;
-    }
-  }
-  if (!_holder_klass->is_loader_alive(is_alive)) {
-    return false;
-  }
-  return true;
-}
-
 // Printing
 
 void CompiledICHolder::print_on(outputStream* st) const {
--- a/src/hotspot/share/oops/compiledICHolder.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/compiledICHolder.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,8 @@
 
 #include "oops/oop.hpp"
 #include "utilities/macros.hpp"
+#include "oops/klass.hpp"
+#include "oops/method.hpp"
 
 // A CompiledICHolder* is a helper object for the inline cache implementation.
 // It holds:
@@ -49,10 +51,11 @@
   Metadata* _holder_metadata;
   Klass*    _holder_klass;    // to avoid name conflict with oopDesc::_klass
   CompiledICHolder* _next;
+  bool _is_metadata_method;
 
  public:
   // Constructor
-  CompiledICHolder(Metadata* metadata, Klass* klass);
+  CompiledICHolder(Metadata* metadata, Klass* klass, bool is_method = true);
   ~CompiledICHolder() NOT_DEBUG_RETURN;
 
   static int live_count() { return _live_count; }
@@ -71,7 +74,16 @@
   CompiledICHolder* next()     { return _next; }
   void set_next(CompiledICHolder* n) { _next = n; }
 
-  bool is_loader_alive(BoolObjectClosure* is_alive);
+  inline bool is_loader_alive(BoolObjectClosure* is_alive) {
+    Klass* k = _is_metadata_method ? ((Method*)_holder_metadata)->method_holder() : (Klass*)_holder_metadata;
+    if (!k->is_loader_alive(is_alive)) {
+      return false;
+    }
+    if (!_holder_klass->is_loader_alive(is_alive)) {
+      return false;
+    }
+    return true;
+  }
 
   // Verify
   void verify_on(outputStream* st);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/oops/compressedOops.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_OOPS_COMPRESSEDOOPS_INLINE_HPP
+#define SHARE_OOPS_COMPRESSEDOOPS_INLINE_HPP
+
+#include "gc/shared/collectedHeap.hpp"
+#include "memory/universe.hpp"
+#include "oops/oop.hpp"
+
+// Functions for encoding and decoding compressed oops.
+// If the oops are compressed, the type passed to these overloaded functions
+// is narrowOop.  All functions are overloaded so they can be called by
+// template functions without conditionals (the compiler instantiates via
+// the right type and inlines the appropriate code).
+
+// Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
+// offset from the heap base.  Saving the check for null can save instructions
+// in inner GC loops so these are separated.
+
+namespace CompressedOops {
+  inline bool is_null(oop obj)       { return obj == NULL; }
+  inline bool is_null(narrowOop obj) { return obj == 0; }
+
+  inline oop decode_not_null(narrowOop v) {
+    assert(!is_null(v), "narrow oop value can never be zero");
+    address base = Universe::narrow_oop_base();
+    int    shift = Universe::narrow_oop_shift();
+    oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
+    assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
+    return result;
+  }
+
+  inline oop decode(narrowOop v) {
+    return is_null(v) ? (oop)NULL : decode_not_null(v);
+  }
+
+  inline narrowOop encode_not_null(oop v) {
+    assert(!is_null(v), "oop value can never be zero");
+    assert(check_obj_alignment(v), "Address not aligned");
+    assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
+    address base = Universe::narrow_oop_base();
+    int    shift = Universe::narrow_oop_shift();
+    uint64_t  pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
+    assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
+    uint64_t result = pd >> shift;
+    assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
+    assert(decode(result) == v, "reversibility");
+    return (narrowOop)result;
+  }
+
+  inline narrowOop encode(oop v) {
+    return is_null(v) ? (narrowOop)0 : encode_not_null(v);
+  }
+
+  // No conversions needed for these overloads
+  inline oop decode_not_null(oop v)             { return v; }
+  inline oop decode(oop v)                      { return v; }
+  inline narrowOop encode_not_null(narrowOop v) { return v; }
+  inline narrowOop encode(narrowOop v)          { return v; }
+}
+
+#endif // SHARE_OOPS_COMPRESSEDOOPS_INLINE_HPP
--- a/src/hotspot/share/oops/constMethod.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/constMethod.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -23,7 +23,6 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "interpreter/interpreter.hpp"
 #include "memory/heapInspection.hpp"
 #include "memory/metadataFactory.hpp"
@@ -31,6 +30,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/constMethod.hpp"
 #include "oops/method.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "utilities/align.hpp"
 
 // Static initialization
--- a/src/hotspot/share/oops/constantPool.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/constantPool.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -841,7 +841,7 @@
   if (cache_index >= 0) {
     result_oop = this_cp->resolved_references()->obj_at(cache_index);
     if (result_oop != NULL) {
-      if (result_oop == Universe::the_null_sentinel()) {
+      if (oopDesc::equals(result_oop, Universe::the_null_sentinel())) {
         DEBUG_ONLY(int temp_index = (index >= 0 ? index : this_cp->object_to_cp_index(cache_index)));
         assert(this_cp->tag_at(temp_index).is_dynamic_constant(), "only condy uses the null sentinel");
         result_oop = NULL;
@@ -1074,12 +1074,12 @@
     } else {
       // Return the winning thread's result.  This can be different than
       // the result here for MethodHandles.
-      if (old_result == Universe::the_null_sentinel())
+      if (oopDesc::equals(old_result, Universe::the_null_sentinel()))
         old_result = NULL;
       return old_result;
     }
   } else {
-    assert(result_oop != Universe::the_null_sentinel(), "");
+    assert(!oopDesc::equals(result_oop, Universe::the_null_sentinel()), "");
     return result_oop;
   }
 }
@@ -1245,7 +1245,7 @@
 oop ConstantPool::string_at_impl(const constantPoolHandle& this_cp, int which, int obj_index, TRAPS) {
   // If the string has already been interned, this entry will be non-null
   oop str = this_cp->resolved_references()->obj_at(obj_index);
-  assert(str != Universe::the_null_sentinel(), "");
+  assert(!oopDesc::equals(str, Universe::the_null_sentinel()), "");
   if (str != NULL) return str;
   Symbol* sym = this_cp->unresolved_string_at(which);
   str = StringTable::intern(sym, CHECK_(NULL));
--- a/src/hotspot/share/oops/cpCache.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/cpCache.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,6 +27,7 @@
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/bytecodes.hpp"
 #include "interpreter/interpreter.hpp"
+#include "interpreter/linkResolver.hpp"
 #include "interpreter/rewriter.hpp"
 #include "logging/log.hpp"
 #include "memory/metadataFactory.hpp"
--- a/src/hotspot/share/oops/instanceKlass.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/instanceKlass.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -2401,7 +2401,7 @@
   // and package entries. Both must be the same. This rule
   // applies even to classes that are defined in the unnamed
   // package, they still must have the same class loader.
-  if ((classloader1 == classloader2) && (classpkg1 == classpkg2)) {
+  if (oopDesc::equals(classloader1, classloader2) && (classpkg1 == classpkg2)) {
     return true;
   }
 
@@ -2412,7 +2412,7 @@
 // and classname information is enough to determine a class's package
 bool InstanceKlass::is_same_class_package(oop other_class_loader,
                                           const Symbol* other_class_name) const {
-  if (class_loader() != other_class_loader) {
+  if (!oopDesc::equals(class_loader(), other_class_loader)) {
     return false;
   }
   if (name()->fast_compare(other_class_name) == 0) {
@@ -3210,7 +3210,7 @@
 class VerifyFieldClosure: public OopClosure {
  protected:
   template <class T> void do_oop_work(T* p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
+    oop obj = RawAccess<>::oop_load(p);
     if (!oopDesc::is_oop_or_null(obj)) {
       tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p2i(p), p2i(obj));
       Universe::print_on(tty);
--- a/src/hotspot/share/oops/instanceKlass.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/instanceKlass.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -250,6 +250,7 @@
   u1              _init_state;                    // state of class
   u1              _reference_type;                // reference type
 
+  u2              _this_class_index;              // constant pool entry
 #if INCLUDE_JVMTI
   JvmtiCachedClassFieldMap* _jvmti_cached_class_field_map;  // JVMTI: used during heap iteration
 #endif
@@ -516,6 +517,10 @@
     _reference_type = (u1)t;
   }
 
+  // this class cp index
+  u2 this_class_index() const             { return _this_class_index; }
+  void set_this_class_index(u2 index)     { _this_class_index = index; }
+
   static ByteSize reference_type_offset() { return in_ByteSize(offset_of(InstanceKlass, _reference_type)); }
 
   // find local field, returns true if found
--- a/src/hotspot/share/oops/instanceRefKlass.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/instanceRefKlass.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -28,6 +28,8 @@
 #include "classfile/javaClasses.inline.hpp"
 #include "gc/shared/referenceProcessor.hpp"
 #include "logging/log.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/instanceRefKlass.hpp"
 #include "oops/oop.inline.hpp"
@@ -63,9 +65,9 @@
 bool InstanceRefKlass::try_discover(oop obj, ReferenceType type, OopClosureType* closure) {
   ReferenceProcessor* rp = closure->ref_processor();
   if (rp != NULL) {
-    T referent_oop = oopDesc::load_heap_oop((T*)java_lang_ref_Reference::referent_addr_raw(obj));
-    if (!oopDesc::is_null(referent_oop)) {
-      oop referent = oopDesc::decode_heap_oop_not_null(referent_oop);
+    T referent_oop = RawAccess<>::oop_load((T*)java_lang_ref_Reference::referent_addr_raw(obj));
+    if (!CompressedOops::is_null(referent_oop)) {
+      oop referent = CompressedOops::decode_not_null(referent_oop);
       if (!referent->is_gc_marked()) {
         // Only try to discover if not yet marked.
         return rp->discover_reference(obj, type);
@@ -86,8 +88,8 @@
   do_referent<nv, T>(obj, closure, contains);
 
   // Treat discovered as normal oop, if ref is not "active" (next non-NULL).
-  T next_oop  = oopDesc::load_heap_oop((T*)java_lang_ref_Reference::next_addr_raw(obj));
-  if (!oopDesc::is_null(next_oop)) {
+  T next_oop  = RawAccess<>::oop_load((T*)java_lang_ref_Reference::next_addr_raw(obj));
+  if (!CompressedOops::is_null(next_oop)) {
     do_discovered<nv, T>(obj, closure, contains);
   }
 
@@ -195,11 +197,11 @@
 
   log_develop_trace(gc, ref)("InstanceRefKlass %s for obj " PTR_FORMAT, s, p2i(obj));
   log_develop_trace(gc, ref)("     referent_addr/* " PTR_FORMAT " / " PTR_FORMAT,
-      p2i(referent_addr), p2i(referent_addr ? (address)oopDesc::load_decode_heap_oop(referent_addr) : NULL));
+      p2i(referent_addr), p2i(referent_addr ? RawAccess<>::oop_load(referent_addr) : (oop)NULL));
   log_develop_trace(gc, ref)("     next_addr/* " PTR_FORMAT " / " PTR_FORMAT,
-      p2i(next_addr), p2i(next_addr ? (address)oopDesc::load_decode_heap_oop(next_addr) : NULL));
+      p2i(next_addr), p2i(next_addr ? RawAccess<>::oop_load(next_addr) : (oop)NULL));
   log_develop_trace(gc, ref)("     discovered_addr/* " PTR_FORMAT " / " PTR_FORMAT,
-      p2i(discovered_addr), p2i(discovered_addr ? (address)oopDesc::load_decode_heap_oop(discovered_addr) : NULL));
+      p2i(discovered_addr), p2i(discovered_addr ? RawAccess<>::oop_load(discovered_addr) : (oop)NULL));
 }
 #endif
 
--- a/src/hotspot/share/oops/klass.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/klass.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -35,6 +35,7 @@
 #include "memory/metaspaceShared.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/klass.inline.hpp"
 #include "oops/oop.inline.hpp"
@@ -569,7 +570,7 @@
 oop Klass::archived_java_mirror_raw() {
   assert(DumpSharedSpaces, "called only during runtime");
   assert(has_raw_archived_mirror(), "must have raw archived mirror");
-  return oopDesc::decode_heap_oop(_archived_mirror);
+  return CompressedOops::decode(_archived_mirror);
 }
 
 // Used at CDS runtime to get the archived mirror from shared class. Uses GC barrier.
@@ -582,7 +583,7 @@
 // No GC barrier
 void Klass::set_archived_java_mirror_raw(oop m) {
   assert(DumpSharedSpaces, "called only during runtime");
-  _archived_mirror = oopDesc::encode_heap_oop(m);
+  _archived_mirror = CompressedOops::encode(m);
 }
 #endif // INCLUDE_CDS_JAVA_HEAP
 
--- a/src/hotspot/share/oops/klass.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/klass.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -447,10 +447,6 @@
     }
   }
 
-  // Is an oop/narrowOop null or subtype of this Klass?
-  template <typename T>
-  bool is_instanceof_or_null(T element);
-
   bool search_secondary_supers(Klass* k) const;
 
   // Find LCA in class hierarchy
--- a/src/hotspot/share/oops/klass.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/klass.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -71,13 +71,4 @@
   return is_null(v) ? (Klass*)NULL : decode_klass_not_null(v);
 }
 
-template <typename T>
-bool Klass::is_instanceof_or_null(T element) {
-  if (oopDesc::is_null(element)) {
-    return true;
-  }
-  oop obj = oopDesc::decode_heap_oop_not_null(element);
-  return obj->klass()->is_subtype_of(this);
-}
-
 #endif // SHARE_VM_OOPS_KLASS_INLINE_HPP
--- a/src/hotspot/share/oops/klassVtable.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/klassVtable.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,7 +26,7 @@
 #include "jvm.h"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
-#include "gc/shared/gcLocker.hpp"
+#include "interpreter/linkResolver.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/metaspaceShared.hpp"
@@ -39,6 +39,7 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/handles.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "utilities/copy.hpp"
 
 inline InstanceKlass* klassVtable::ik() const {
@@ -496,7 +497,7 @@
           // to link to the first super, and we get all the others.
           Handle super_loader(THREAD, super_klass->class_loader());
 
-          if (target_loader() != super_loader()) {
+          if (!oopDesc::equals(target_loader(), super_loader())) {
             ResourceMark rm(THREAD);
             Symbol* failed_type_symbol =
               SystemDictionary::check_signature_loaders(signature, target_loader,
@@ -1225,7 +1226,7 @@
       // if checkconstraints requested
       if (checkconstraints) {
         Handle method_holder_loader (THREAD, target->method_holder()->class_loader());
-        if (method_holder_loader() != interface_loader()) {
+        if (!oopDesc::equals(method_holder_loader(), interface_loader())) {
           ResourceMark rm(THREAD);
           Symbol* failed_type_symbol =
             SystemDictionary::check_signature_loaders(m->signature(),
--- a/src/hotspot/share/oops/klassVtable.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/klassVtable.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_OOPS_KLASSVTABLE_HPP
 #define SHARE_VM_OOPS_KLASSVTABLE_HPP
 
-#include "memory/allocation.hpp"
 #include "oops/oopsHierarchy.hpp"
 #include "runtime/handles.hpp"
 #include "utilities/growableArray.hpp"
--- a/src/hotspot/share/oops/method.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/method.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -28,7 +28,6 @@
 #include "code/codeCache.hpp"
 #include "code/debugInfoRec.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/generation.hpp"
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/bytecodeTracer.hpp"
@@ -58,6 +57,7 @@
 #include "runtime/init.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/relocator.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
 #include "utilities/align.hpp"
@@ -2372,9 +2372,9 @@
     ptr = ptr->_next;
   }
   TouchedMethodRecord* nptr = NEW_C_HEAP_OBJ(TouchedMethodRecord, mtTracing);
-  my_class->set_permanent();  // prevent reclaimed by GC
-  my_name->set_permanent();
-  my_sig->set_permanent();
+  my_class->increment_refcount();
+  my_name->increment_refcount();
+  my_sig->increment_refcount();
   nptr->_class_name         = my_class;
   nptr->_method_name        = my_name;
   nptr->_method_signature   = my_sig;
--- a/src/hotspot/share/oops/methodData.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/methodData.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -25,7 +25,6 @@
 #include "precompiled.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "compiler/compilerOracle.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "interpreter/bytecode.hpp"
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/linkResolver.hpp"
@@ -39,6 +38,7 @@
 #include "runtime/deoptimization.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/orderAccess.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "utilities/align.hpp"
 #include "utilities/copy.hpp"
 
--- a/src/hotspot/share/oops/objArrayKlass.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/objArrayKlass.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -220,7 +220,7 @@
 // Either oop or narrowOop depending on UseCompressedOops.
 template <class T> void ObjArrayKlass::do_copy(arrayOop s, T* src,
                                arrayOop d, T* dst, int length, TRAPS) {
-  if (s == d) {
+  if (oopDesc::equals(s, d)) {
     // since source and destination are equal we do not need conversion checks.
     assert(length > 0, "sanity check");
     HeapAccess<>::oop_arraycopy(s, d, src, dst, length);
--- a/src/hotspot/share/oops/oop.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/oop.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,6 +26,7 @@
 #include "classfile/altHashing.hpp"
 #include "classfile/javaClasses.inline.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/verifyOopClosure.hpp"
 #include "runtime/handles.inline.hpp"
@@ -155,7 +156,7 @@
 VerifyOopClosure VerifyOopClosure::verify_oop;
 
 template <class T> void VerifyOopClosure::do_oop_work(T* p) {
-  oop obj = oopDesc::load_decode_heap_oop(p);
+  oop obj = RawAccess<>::oop_load(p);
   guarantee(oopDesc::is_oop_or_null(obj), "invalid oop: " INTPTR_FORMAT, p2i((oopDesc*) obj));
 }
 
--- a/src/hotspot/share/oops/oop.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/oop.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -127,9 +127,6 @@
   // Need this as public for garbage collection.
   template <class T> inline T* obj_field_addr_raw(int offset) const;
 
-  inline static bool is_null(oop obj)       { return obj == NULL; }
-  inline static bool is_null(narrowOop obj) { return obj == 0; }
-
   // Standard compare function returns negative value if o1 < o2
   //                                   0              if o1 == o2
   //                                   positive value if o1 > o2
@@ -145,40 +142,7 @@
     }
   }
 
-  // Decode an oop pointer from a narrowOop if compressed.
-  // These are overloaded for oop and narrowOop as are the other functions
-  // below so that they can be called in template functions.
-  static inline oop decode_heap_oop_not_null(oop v) { return v; }
-  static inline oop decode_heap_oop_not_null(narrowOop v);
-  static inline oop decode_heap_oop(oop v) { return v; }
-  static inline oop decode_heap_oop(narrowOop v);
-
-  // Encode an oop pointer to a narrow oop. The or_null versions accept
-  // null oop pointer, others do not in order to eliminate the
-  // null checking branches.
-  static inline narrowOop encode_heap_oop_not_null(oop v);
-  static inline narrowOop encode_heap_oop(oop v);
-
-  // Load an oop out of the Java heap as is without decoding.
-  // Called by GC to check for null before decoding.
-  static inline narrowOop load_heap_oop(narrowOop* p);
-  static inline oop       load_heap_oop(oop* p);
-
-  // Load an oop out of Java heap and decode it to an uncompressed oop.
-  static inline oop load_decode_heap_oop_not_null(narrowOop* p);
-  static inline oop load_decode_heap_oop_not_null(oop* p);
-  static inline oop load_decode_heap_oop(narrowOop* p);
-  static inline oop load_decode_heap_oop(oop* p);
-
-  // Store already encoded heap oop into the heap.
-  static inline void store_heap_oop(narrowOop* p, narrowOop v);
-  static inline void store_heap_oop(oop* p, oop v);
-
-  // Encode oop if UseCompressedOops and store into the heap.
-  static inline void encode_store_heap_oop_not_null(narrowOop* p, oop v);
-  static inline void encode_store_heap_oop_not_null(oop* p, oop v);
-  static inline void encode_store_heap_oop(narrowOop* p, oop v);
-  static inline void encode_store_heap_oop(oop* p, oop v);
+  inline static bool equals(oop o1, oop o2) { return Access<>::equals(o1, o2); }
 
   // Access to fields in a instanceOop through these methods.
   template <DecoratorSet decorator>
@@ -347,6 +311,8 @@
   inline int oop_iterate_no_header(OopClosure* bk);
   inline int oop_iterate_no_header(OopClosure* bk, MemRegion mr);
 
+  inline static bool is_instanceof_or_null(oop obj, Klass* klass);
+
   // identity hash; returns the identity hash key (computes it if necessary)
   // NOTE with the introduction of UseBiasedLocking that identity_hash() might reach a
   // safepoint if called on a biased object. Calling code must be aware of that.
--- a/src/hotspot/share/oops/oop.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/oop.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,12 +26,12 @@
 #define SHARE_VM_OOPS_OOP_INLINE_HPP
 
 #include "gc/shared/ageTable.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
+#include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/generation.hpp"
 #include "oops/access.inline.hpp"
 #include "oops/arrayKlass.hpp"
 #include "oops/arrayOop.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
 #include "oops/markOop.inline.hpp"
 #include "oops/oop.hpp"
@@ -136,7 +136,7 @@
   // This is only to be used during GC, for from-space objects, so no
   // barrier is needed.
   if (UseCompressedClassPointers) {
-    _metadata._compressed_klass = (narrowKlass)encode_heap_oop(k);  // may be null (parnew overflow handling)
+    _metadata._compressed_klass = (narrowKlass)CompressedOops::encode(k);  // may be null (parnew overflow handling)
   } else {
     _metadata._klass = (Klass*)(address)k;
   }
@@ -145,7 +145,7 @@
 oop oopDesc::list_ptr_from_klass() {
   // This is only to be used during GC, for from-space objects.
   if (UseCompressedClassPointers) {
-    return decode_heap_oop((narrowOop)_metadata._compressed_klass);
+    return CompressedOops::decode((narrowOop)_metadata._compressed_klass);
   } else {
     // Special case for GC
     return (oop)(address)_metadata._klass;
@@ -239,83 +239,6 @@
 template <class T>
 T*       oopDesc::obj_field_addr_raw(int offset) const { return (T*) field_addr_raw(offset); }
 
-// Functions for getting and setting oops within instance objects.
-// If the oops are compressed, the type passed to these overloaded functions
-// is narrowOop.  All functions are overloaded so they can be called by
-// template functions without conditionals (the compiler instantiates via
-// the right type and inlines the appopriate code).
-
-// Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
-// offset from the heap base.  Saving the check for null can save instructions
-// in inner GC loops so these are separated.
-
-inline bool check_obj_alignment(oop obj) {
-  return (cast_from_oop<intptr_t>(obj) & MinObjAlignmentInBytesMask) == 0;
-}
-
-oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
-  assert(!is_null(v), "narrow oop value can never be zero");
-  address base = Universe::narrow_oop_base();
-  int    shift = Universe::narrow_oop_shift();
-  oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
-  assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
-  return result;
-}
-
-oop oopDesc::decode_heap_oop(narrowOop v) {
-  return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
-}
-
-narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
-  assert(!is_null(v), "oop value can never be zero");
-  assert(check_obj_alignment(v), "Address not aligned");
-  assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
-  address base = Universe::narrow_oop_base();
-  int    shift = Universe::narrow_oop_shift();
-  uint64_t  pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
-  assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
-  uint64_t result = pd >> shift;
-  assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
-  assert(decode_heap_oop(result) == v, "reversibility");
-  return (narrowOop)result;
-}
-
-narrowOop oopDesc::encode_heap_oop(oop v) {
-  return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
-}
-
-narrowOop oopDesc::load_heap_oop(narrowOop* p) { return *p; }
-oop       oopDesc::load_heap_oop(oop* p)       { return *p; }
-
-void oopDesc::store_heap_oop(narrowOop* p, narrowOop v) { *p = v; }
-void oopDesc::store_heap_oop(oop* p, oop v)             { *p = v; }
-
-// Load and decode an oop out of the Java heap into a wide oop.
-oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
-  return decode_heap_oop_not_null(load_heap_oop(p));
-}
-
-// Load and decode an oop out of the heap accepting null
-oop oopDesc::load_decode_heap_oop(narrowOop* p) {
-  return decode_heap_oop(load_heap_oop(p));
-}
-
-oop oopDesc::load_decode_heap_oop_not_null(oop* p) { return *p; }
-oop oopDesc::load_decode_heap_oop(oop* p)          { return *p; }
-
-void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
-void oopDesc::encode_store_heap_oop(oop* p, oop v)          { *p = v; }
-
-// Encode and store a heap oop.
-void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
-  *p = encode_heap_oop_not_null(v);
-}
-
-// Encode and store a heap oop allowing for null.
-void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
-  *p = encode_heap_oop(v);
-}
-
 template <DecoratorSet decorators>
 inline oop  oopDesc::obj_field_access(int offset) const             { return HeapAccess<decorators>::oop_load_at(as_oop(), offset); }
 inline oop  oopDesc::obj_field(int offset) const                    { return HeapAccess<>::oop_load_at(as_oop(), offset);  }
@@ -525,6 +448,10 @@
 ALL_OOP_OOP_ITERATE_CLOSURES_1(ALL_OOPDESC_OOP_ITERATE)
 ALL_OOP_OOP_ITERATE_CLOSURES_2(ALL_OOPDESC_OOP_ITERATE)
 
+bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) {
+  return obj == NULL || obj->klass()->is_subtype_of(klass);
+}
+
 intptr_t oopDesc::identity_hash() {
   // Fast case; if the object is unlocked and the hash value is set, no locking is needed
   // Note: The mark must be read into local variable to avoid concurrent updates.
--- a/src/hotspot/share/oops/oopsHierarchy.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/oopsHierarchy.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -192,6 +192,10 @@
   return (T)(CHECK_UNHANDLED_OOPS_ONLY((void*))o);
 }
 
+inline bool check_obj_alignment(oop obj) {
+  return (cast_from_oop<intptr_t>(obj) & MinObjAlignmentInBytesMask) == 0;
+}
+
 // The metadata hierarchy is separate from the oop hierarchy
 
 //      class MetaspaceObj
--- a/src/hotspot/share/oops/symbol.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/oops/symbol.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -165,13 +165,6 @@
   int refcount() const      { return _refcount; }
   void increment_refcount();
   void decrement_refcount();
-  // Set _refcount non zero to avoid being reclaimed by GC.
-  void set_permanent() {
-    assert(LogTouchedMethods, "Should not be called with LogTouchedMethods off");
-    if (_refcount != PERM_REFCOUNT) {
-      _refcount = PERM_REFCOUNT;
-    }
-  }
   bool is_permanent() {
     return (_refcount == PERM_REFCOUNT);
   }
--- a/src/hotspot/share/opto/loopTransform.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/opto/loopTransform.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -70,11 +70,20 @@
   // put body of outer strip mined loop on igvn work list as well
   if (_head->is_CountedLoop() && _head->as_Loop()->is_strip_mined()) {
     CountedLoopNode* l = _head->as_CountedLoop();
-    _phase->_igvn._worklist.push(l->outer_loop());
-    _phase->_igvn._worklist.push(l->outer_loop_tail());
-    _phase->_igvn._worklist.push(l->outer_loop_end());
-    _phase->_igvn._worklist.push(l->outer_safepoint());
+    Node* outer_loop = l->outer_loop();
+    assert(outer_loop != NULL, "missing piece of strip mined loop");
+    _phase->_igvn._worklist.push(outer_loop);
+    Node* outer_loop_tail = l->outer_loop_tail();
+    assert(outer_loop_tail != NULL, "missing piece of strip mined loop");
+    _phase->_igvn._worklist.push(outer_loop_tail);
+    Node* outer_loop_end = l->outer_loop_end();
+    assert(outer_loop_end != NULL, "missing piece of strip mined loop");
+    _phase->_igvn._worklist.push(outer_loop_end);
+    Node* outer_safepoint = l->outer_safepoint();
+    assert(outer_safepoint != NULL, "missing piece of strip mined loop");
+    _phase->_igvn._worklist.push(outer_safepoint);
     Node* cle_out = _head->as_CountedLoop()->loopexit()->proj_out(false);
+    assert(cle_out != NULL, "missing piece of strip mined loop");
     _phase->_igvn._worklist.push(cle_out);
   }
 }
--- a/src/hotspot/share/opto/runtime.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/opto/runtime.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -38,7 +38,7 @@
 #include "gc/g1/heapRegion.hpp"
 #include "gc/shared/barrierSet.hpp"
 #include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "interpreter/bytecode.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/linkResolver.hpp"
--- a/src/hotspot/share/opto/type.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/opto/type.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -28,7 +28,6 @@
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "compiler/compileLog.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "libadt/dict.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
--- a/src/hotspot/share/prims/jni.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/prims/jni.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -36,6 +36,7 @@
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
+#include "gc/shared/gcLocker.inline.hpp"
 #include "interpreter/linkResolver.hpp"
 #include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
@@ -71,6 +72,7 @@
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/reflection.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/thread.inline.hpp"
@@ -582,7 +584,7 @@
   oop super_mirror = JNIHandles::resolve_non_null(super);
   if (java_lang_Class::is_primitive(sub_mirror) ||
       java_lang_Class::is_primitive(super_mirror)) {
-    jboolean ret = (sub_mirror == super_mirror);
+    jboolean ret = oopDesc::equals(sub_mirror, super_mirror);
 
     HOTSPOT_JNI_ISASSIGNABLEFROM_RETURN(ret);
     return ret;
@@ -822,7 +824,7 @@
 
   oop a = JNIHandles::resolve(r1);
   oop b = JNIHandles::resolve(r2);
-  jboolean ret = (a == b) ? JNI_TRUE : JNI_FALSE;
+  jboolean ret = oopDesc::equals(a, b) ? JNI_TRUE : JNI_FALSE;
 
   HOTSPOT_JNI_ISSAMEOBJECT_RETURN(ret);
   return ret;
@@ -3144,6 +3146,24 @@
   }
 JNI_END
 
+static oop lock_gc_or_pin_object(JavaThread* thread, jobject obj) {
+  if (Universe::heap()->supports_object_pinning()) {
+    const oop o = JNIHandles::resolve_non_null(obj);
+    return Universe::heap()->pin_object(thread, o);
+  } else {
+    GCLocker::lock_critical(thread);
+    return JNIHandles::resolve_non_null(obj);
+  }
+}
+
+static void unlock_gc_or_unpin_object(JavaThread* thread, jobject obj) {
+  if (Universe::heap()->supports_object_pinning()) {
+    const oop o = JNIHandles::resolve_non_null(obj);
+    return Universe::heap()->unpin_object(thread, o);
+  } else {
+    GCLocker::unlock_critical(thread);
+  }
+}
 
 JNI_ENTRY(void*, jni_GetPrimitiveArrayCritical(JNIEnv *env, jarray array, jboolean *isCopy))
   JNIWrapper("GetPrimitiveArrayCritical");
@@ -3151,8 +3171,7 @@
   if (isCopy != NULL) {
     *isCopy = JNI_FALSE;
   }
-  oop a = JNIHandles::resolve_non_null(array);
-  a = Universe::heap()->pin_object(thread, a);
+  oop a = lock_gc_or_pin_object(thread, array);
   assert(a->is_array(), "just checking");
   BasicType type;
   if (a->is_objArray()) {
@@ -3169,8 +3188,7 @@
 JNI_ENTRY(void, jni_ReleasePrimitiveArrayCritical(JNIEnv *env, jarray array, void *carray, jint mode))
   JNIWrapper("ReleasePrimitiveArrayCritical");
   HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_ENTRY(env, array, carray, mode);
-  oop a = JNIHandles::resolve_non_null(array);
-  Universe::heap()->unpin_object(thread, a);
+  unlock_gc_or_unpin_object(thread, array);
 HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_RETURN();
 JNI_END
 
@@ -3178,8 +3196,7 @@
 JNI_ENTRY(const jchar*, jni_GetStringCritical(JNIEnv *env, jstring string, jboolean *isCopy))
   JNIWrapper("GetStringCritical");
   HOTSPOT_JNI_GETSTRINGCRITICAL_ENTRY(env, string, (uintptr_t *) isCopy);
-  oop s = JNIHandles::resolve_non_null(string);
-  s = Universe::heap()->pin_object(thread, s);
+  oop s = lock_gc_or_pin_object(thread, string);
   typeArrayOop s_value = java_lang_String::value(s);
   bool is_latin1 = java_lang_String::is_latin1(s);
   if (isCopy != NULL) {
@@ -3216,7 +3233,7 @@
     // This assumes that ReleaseStringCritical bookends GetStringCritical.
     FREE_C_HEAP_ARRAY(jchar, chars);
   }
-  Universe::heap()->unpin_object(thread, s);
+  unlock_gc_or_unpin_object(thread, str);
 HOTSPOT_JNI_RELEASESTRINGCRITICAL_RETURN();
 JNI_END
 
--- a/src/hotspot/share/prims/jvm.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/prims/jvm.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1364,7 +1364,7 @@
       protection_domain = method->method_holder()->protection_domain();
     }
 
-    if ((previous_protection_domain != protection_domain) && (protection_domain != NULL)) {
+    if ((!oopDesc::equals(previous_protection_domain, protection_domain)) && (protection_domain != NULL)) {
       local_array->push(protection_domain);
       previous_protection_domain = protection_domain;
     }
--- a/src/hotspot/share/prims/jvmtiExport.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/prims/jvmtiExport.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -54,6 +54,7 @@
 #include "runtime/objectMonitor.hpp"
 #include "runtime/objectMonitor.inline.hpp"
 #include "runtime/os.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadSMR.hpp"
 #include "runtime/vframe.inline.hpp"
--- a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -30,7 +30,6 @@
 #include "classfile/verifier.hpp"
 #include "code/codeCache.hpp"
 #include "compiler/compileBroker.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "interpreter/oopMapCache.hpp"
 #include "interpreter/rewriter.hpp"
 #include "logging/logStream.hpp"
@@ -50,6 +49,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/relocator.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "utilities/bitMap.inline.hpp"
 #include "utilities/events.hpp"
 
--- a/src/hotspot/share/prims/jvmtiThreadState.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/prims/jvmtiThreadState.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -23,12 +23,12 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "jvmtifiles/jvmtiEnv.hpp"
 #include "memory/resourceArea.hpp"
 #include "prims/jvmtiEventController.inline.hpp"
 #include "prims/jvmtiImpl.hpp"
 #include "prims/jvmtiThreadState.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/vframe.hpp"
 
 // marker for when the stack depth has been reset and is now unknown.
--- a/src/hotspot/share/prims/methodComparator.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/prims/methodComparator.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "memory/resourceArea.hpp"
 #include "oops/constantPool.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/symbol.hpp"
--- a/src/hotspot/share/prims/methodHandles.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/prims/methodHandles.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -44,6 +44,7 @@
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/timerTrace.hpp"
 #include "runtime/reflection.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "utilities/exceptions.hpp"
@@ -305,7 +306,7 @@
 
   Handle resolved_method = info.resolved_method_name();
   assert(java_lang_invoke_ResolvedMethodName::vmtarget(resolved_method()) == m(),
-         "Should not change after link resolultion");
+         "Should not change after link resolution");
 
   oop mname_oop = mname();
   java_lang_invoke_MemberName::set_flags  (mname_oop, flags);
@@ -681,7 +682,8 @@
 // An unresolved member name is a mere symbolic reference.
 // Resolving it plants a vmtarget/vmindex in it,
 // which refers directly to JVM internals.
-Handle MethodHandles::resolve_MemberName(Handle mname, Klass* caller, TRAPS) {
+Handle MethodHandles::resolve_MemberName(Handle mname, Klass* caller,
+                                         bool speculative_resolve, TRAPS) {
   Handle empty;
   assert(java_lang_invoke_MemberName::is_instance(mname()), "");
 
@@ -780,6 +782,9 @@
           assert(false, "ref_kind=%d", ref_kind);
         }
         if (HAS_PENDING_EXCEPTION) {
+          if (speculative_resolve) {
+            CLEAR_PENDING_EXCEPTION;
+          }
           return empty;
         }
       }
@@ -805,6 +810,9 @@
           break;                // will throw after end of switch
         }
         if (HAS_PENDING_EXCEPTION) {
+          if (speculative_resolve) {
+            CLEAR_PENDING_EXCEPTION;
+          }
           return empty;
         }
       }
@@ -821,6 +829,9 @@
         LinkInfo link_info(defc, name, type, caller, LinkInfo::skip_access_check);
         LinkResolver::resolve_field(result, link_info, Bytecodes::_nop, false, THREAD);
         if (HAS_PENDING_EXCEPTION) {
+          if (speculative_resolve) {
+            CLEAR_PENDING_EXCEPTION;
+          }
           return empty;
         }
       }
@@ -961,7 +972,7 @@
         if (!java_lang_invoke_MemberName::is_instance(result()))
           return -99;  // caller bug!
         oop saved = MethodHandles::init_field_MemberName(result, st.field_descriptor());
-        if (saved != result())
+        if (!oopDesc::equals(saved, result()))
           results->obj_at_put(rfill-1, saved);  // show saved instance to user
       } else if (++overflow >= overflow_limit) {
         match_flags = 0; break; // got tired of looking at overflow
@@ -1013,7 +1024,7 @@
           return -99;  // caller bug!
         CallInfo info(m, NULL, CHECK_0);
         oop saved = MethodHandles::init_method_MemberName(result, info);
-        if (saved != result())
+        if (!oopDesc::equals(saved, result()))
           results->obj_at_put(rfill-1, saved);  // show saved instance to user
       } else if (++overflow >= overflow_limit) {
         match_flags = 0; break; // got tired of looking at overflow
@@ -1186,7 +1197,8 @@
 JVM_END
 
 // void resolve(MemberName self, Class<?> caller)
-JVM_ENTRY(jobject, MHN_resolve_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jclass caller_jh)) {
+JVM_ENTRY(jobject, MHN_resolve_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jclass caller_jh,
+    jboolean speculative_resolve)) {
   if (mname_jh == NULL) { THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), "mname is null"); }
   Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh));
 
@@ -1214,7 +1226,8 @@
 
   Klass* caller = caller_jh == NULL ? NULL :
                      java_lang_Class::as_Klass(JNIHandles::resolve_non_null(caller_jh));
-  Handle resolved = MethodHandles::resolve_MemberName(mname, caller, CHECK_NULL);
+  Handle resolved = MethodHandles::resolve_MemberName(mname, caller, speculative_resolve == JNI_TRUE,
+                                                      CHECK_NULL);
 
   if (resolved.is_null()) {
     int flags = java_lang_invoke_MemberName::flags(mname());
@@ -1222,6 +1235,10 @@
     if (!MethodHandles::ref_kind_is_valid(ref_kind)) {
       THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), "obsolete MemberName format");
     }
+    if (speculative_resolve) {
+      assert(!HAS_PENDING_EXCEPTION, "No exceptions expected when resolving speculatively");
+      return NULL;
+    }
     if ((flags & ALL_KINDS) == IS_FIELD) {
       THROW_MSG_NULL(vmSymbols::java_lang_NoSuchFieldError(), "field resolution failed");
     } else if ((flags & ALL_KINDS) == IS_METHOD ||
@@ -1513,7 +1530,7 @@
 static JNINativeMethod MHN_methods[] = {
   {CC "init",                      CC "(" MEM "" OBJ ")V",                   FN_PTR(MHN_init_Mem)},
   {CC "expand",                    CC "(" MEM ")V",                          FN_PTR(MHN_expand_Mem)},
-  {CC "resolve",                   CC "(" MEM "" CLS ")" MEM,                FN_PTR(MHN_resolve_Mem)},
+  {CC "resolve",                   CC "(" MEM "" CLS "Z)" MEM,               FN_PTR(MHN_resolve_Mem)},
   //  static native int getNamedCon(int which, Object[] name)
   {CC "getNamedCon",               CC "(I[" OBJ ")I",                        FN_PTR(MHN_getNamedCon)},
   //  static native int getMembers(Class<?> defc, String matchName, String matchSig,
--- a/src/hotspot/share/prims/methodHandles.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/prims/methodHandles.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -61,7 +61,8 @@
 
  public:
   // working with member names
-  static Handle resolve_MemberName(Handle mname, Klass* caller, TRAPS); // compute vmtarget/vmindex from name/type
+  static Handle resolve_MemberName(Handle mname, Klass* caller,
+                                   bool speculative_resolve, TRAPS); // compute vmtarget/vmindex from name/type
   static void expand_MemberName(Handle mname, int suppress, TRAPS);  // expand defc/name/type if missing
   static oop init_MemberName(Handle mname_h, Handle target_h, TRAPS); // compute vmtarget/vmindex from target
   static oop init_field_MemberName(Handle mname_h, fieldDescriptor& fd, bool is_setter = false);
--- a/src/hotspot/share/prims/privilegedStack.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/prims/privilegedStack.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -28,6 +28,7 @@
 #include "oops/method.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/privilegedStack.hpp"
+#include "runtime/thread.inline.hpp"
 #include "runtime/vframe.inline.hpp"
 
 void PrivilegedElement::initialize(vframeStream* vfst, oop context, PrivilegedElement* next, TRAPS) {
--- a/src/hotspot/share/prims/privilegedStack.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/prims/privilegedStack.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_PRIMS_PRIVILEGEDSTACK_HPP
 #define SHARE_VM_PRIMS_PRIVILEGEDSTACK_HPP
 
-#include "memory/allocation.hpp"
 #include "oops/oopsHierarchy.hpp"
 #include "runtime/vframe.hpp"
 #include "utilities/growableArray.hpp"
--- a/src/hotspot/share/prims/resolvedMethodTable.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/prims/resolvedMethodTable.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,8 +23,9 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/shared/gcLocker.hpp"
+#include "classfile/javaClasses.hpp"
 #include "memory/allocation.hpp"
+#include "memory/resourceArea.hpp"
 #include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/method.hpp"
@@ -32,6 +33,7 @@
 #include "prims/resolvedMethodTable.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "utilities/hashtable.inline.hpp"
 #include "utilities/macros.hpp"
 
--- a/src/hotspot/share/prims/stackwalk.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/prims/stackwalk.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -48,7 +48,7 @@
 bool BaseFrameStream::check_magic(objArrayHandle frames_array) {
   oop   m1 = frames_array->obj_at(magic_pos);
   jlong m2 = _anchor;
-  if (m1 == _thread->threadObj() && m2 == address_value())  return true;
+  if (oopDesc::equals(m1, _thread->threadObj()) && m2 == address_value())  return true;
   return false;
 }
 
@@ -79,7 +79,7 @@
 {
   assert(thread != NULL && thread->is_Java_thread(), "");
   oop m1 = frames_array->obj_at(magic_pos);
-  if (m1 != thread->threadObj())      return NULL;
+  if (!oopDesc::equals(m1, thread->threadObj())) return NULL;
   if (magic == 0L)                    return NULL;
   BaseFrameStream* stream = (BaseFrameStream*) (intptr_t) magic;
   if (!stream->is_valid_in(thread, frames_array))   return NULL;
--- a/src/hotspot/share/prims/unsafe.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/prims/unsafe.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -123,6 +123,10 @@
   assert_field_offset_sane(p, field_offset);
   jlong byte_offset = field_offset_to_byte_offset(field_offset);
 
+  if (p != NULL) {
+    p = Access<>::resolve(p);
+  }
+
   if (sizeof(char*) == sizeof(jint)) {   // (this constant folds!)
     return (address)p + (jint) byte_offset;
   } else {
@@ -209,7 +213,7 @@
   }
 
   T get() {
-    if (oopDesc::is_null(_obj)) {
+    if (_obj == NULL) {
       GuardUnsafeAccess guard(_thread);
       T ret = RawAccess<>::load(addr());
       return normalize_for_read(ret);
@@ -220,7 +224,7 @@
   }
 
   void put(T x) {
-    if (oopDesc::is_null(_obj)) {
+    if (_obj == NULL) {
       GuardUnsafeAccess guard(_thread);
       RawAccess<>::store(addr(), normalize_for_write(x));
     } else {
@@ -230,7 +234,7 @@
 
 
   T get_volatile() {
-    if (oopDesc::is_null(_obj)) {
+    if (_obj == NULL) {
       GuardUnsafeAccess guard(_thread);
       volatile T ret = RawAccess<MO_SEQ_CST>::load(addr());
       return normalize_for_read(ret);
@@ -241,7 +245,7 @@
   }
 
   void put_volatile(T x) {
-    if (oopDesc::is_null(_obj)) {
+    if (_obj == NULL) {
       GuardUnsafeAccess guard(_thread);
       RawAccess<MO_SEQ_CST>::store(addr(), normalize_for_write(x));
     } else {
@@ -871,7 +875,7 @@
 
 UNSAFE_ENTRY(jint, Unsafe_CompareAndExchangeInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) {
   oop p = JNIHandles::resolve(obj);
-  if (oopDesc::is_null(p)) {
+  if (p == NULL) {
     volatile jint* addr = (volatile jint*)index_oop_from_field_offset_long(p, offset);
     return RawAccess<>::atomic_cmpxchg(x, addr, e);
   } else {
@@ -882,7 +886,7 @@
 
 UNSAFE_ENTRY(jlong, Unsafe_CompareAndExchangeLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) {
   oop p = JNIHandles::resolve(obj);
-  if (oopDesc::is_null(p)) {
+  if (p == NULL) {
     volatile jlong* addr = (volatile jlong*)index_oop_from_field_offset_long(p, offset);
     return RawAccess<>::atomic_cmpxchg(x, addr, e);
   } else {
@@ -897,12 +901,12 @@
   oop p = JNIHandles::resolve(obj);
   assert_field_offset_sane(p, offset);
   oop ret = HeapAccess<ON_UNKNOWN_OOP_REF>::oop_atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e);
-  return ret == e;
+  return oopDesc::equals(ret, e);
 } UNSAFE_END
 
 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) {
   oop p = JNIHandles::resolve(obj);
-  if (oopDesc::is_null(p)) {
+  if (p == NULL) {
     volatile jint* addr = (volatile jint*)index_oop_from_field_offset_long(p, offset);
     return RawAccess<>::atomic_cmpxchg(x, addr, e) == e;
   } else {
@@ -913,7 +917,7 @@
 
 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) {
   oop p = JNIHandles::resolve(obj);
-  if (oopDesc::is_null(p)) {
+  if (p == NULL) {
     volatile jlong* addr = (volatile jlong*)index_oop_from_field_offset_long(p, offset);
     return RawAccess<>::atomic_cmpxchg(x, addr, e) == e;
   } else {
--- a/src/hotspot/share/prims/whitebox.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/prims/whitebox.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -32,6 +32,8 @@
 #include "code/codeCache.hpp"
 #include "compiler/methodMatcher.hpp"
 #include "compiler/directivesParser.hpp"
+#include "gc/shared/gcConfig.hpp"
+#include "gc/shared/genCollectedHeap.hpp"
 #include "jvmtifiles/jvmtiEnv.hpp"
 #include "memory/metadataFactory.hpp"
 #include "memory/metaspaceShared.hpp"
@@ -61,6 +63,7 @@
 #include "runtime/thread.hpp"
 #include "runtime/threadSMR.hpp"
 #include "runtime/vm_version.hpp"
+#include "services/memoryService.hpp"
 #include "utilities/align.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/elfFile.hpp"
@@ -70,9 +73,9 @@
 #include "prims/cdsoffsets.hpp"
 #endif // INCLUDE_CDS
 #if INCLUDE_ALL_GCS
-#include "gc/g1/concurrentMarkThread.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1ConcurrentMark.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
 #include "gc/parallel/adjoiningGenerations.hpp"
@@ -310,47 +313,16 @@
                                         (size_t) magnitude, (size_t) iterations);
 WB_END
 
-static const jint serial_code   = 1;
-static const jint parallel_code = 2;
-static const jint cms_code      = 4;
-static const jint g1_code       = 8;
-
-WB_ENTRY(jint, WB_CurrentGC(JNIEnv* env, jobject o, jobject obj))
-  if (UseSerialGC) {
-    return serial_code;
-  } else if (UseParallelGC || UseParallelOldGC) {
-    return parallel_code;
-  } if (UseConcMarkSweepGC) {
-    return cms_code;
-  } else if (UseG1GC) {
-    return g1_code;
-  }
-  ShouldNotReachHere();
-  return 0;
+WB_ENTRY(jboolean, WB_IsGCSupported(JNIEnv* env, jobject o, jint name))
+  return GCConfig::is_gc_supported((CollectedHeap::Name)name);
 WB_END
 
-WB_ENTRY(jint, WB_AllSupportedGC(JNIEnv* env, jobject o, jobject obj))
-#if INCLUDE_ALL_GCS
-  return serial_code | parallel_code | cms_code | g1_code;
-#else
-  return serial_code;
-#endif // INCLUDE_ALL_GCS
+WB_ENTRY(jboolean, WB_IsGCSelected(JNIEnv* env, jobject o, jint name))
+  return GCConfig::is_gc_selected((CollectedHeap::Name)name);
 WB_END
 
-WB_ENTRY(jboolean, WB_GCSelectedByErgo(JNIEnv* env, jobject o, jobject obj))
-  if (UseSerialGC) {
-    return FLAG_IS_ERGO(UseSerialGC);
-  } else if (UseParallelGC) {
-    return FLAG_IS_ERGO(UseParallelGC);
-  } else if (UseParallelOldGC) {
-    return FLAG_IS_ERGO(UseParallelOldGC);
-  } else if (UseConcMarkSweepGC) {
-    return FLAG_IS_ERGO(UseConcMarkSweepGC);
-  } else if (UseG1GC) {
-    return FLAG_IS_ERGO(UseG1GC);
-  }
-  ShouldNotReachHere();
-  return false;
+WB_ENTRY(jboolean, WB_IsGCSelectedErgonomically(JNIEnv* env, jobject o))
+  return GCConfig::is_gc_selected_ergonomically();
 WB_END
 
 WB_ENTRY(jboolean, WB_isObjectInOldGen(JNIEnv* env, jobject o, jobject obj))
@@ -2160,10 +2132,10 @@
   {CC"handshakeWalkStack", CC"(Ljava/lang/Thread;Z)I", (void*)&WB_HandshakeWalkStack },
   {CC"addCompilerDirective",    CC"(Ljava/lang/String;)I",
                                                       (void*)&WB_AddCompilerDirective },
-  {CC"removeCompilerDirective",   CC"(I)V",             (void*)&WB_RemoveCompilerDirective },
-  {CC"currentGC",                 CC"()I",            (void*)&WB_CurrentGC},
-  {CC"allSupportedGC",            CC"()I",            (void*)&WB_AllSupportedGC},
-  {CC"gcSelectedByErgo",          CC"()Z",            (void*)&WB_GCSelectedByErgo},
+  {CC"removeCompilerDirective",   CC"(I)V",           (void*)&WB_RemoveCompilerDirective },
+  {CC"isGCSupported",             CC"(I)Z",           (void*)&WB_IsGCSupported},
+  {CC"isGCSelected",              CC"(I)Z",           (void*)&WB_IsGCSelected},
+  {CC"isGCSelectedErgonomically", CC"()Z",            (void*)&WB_IsGCSelectedErgonomically},
   {CC"supportsConcurrentGCPhaseControl", CC"()Z",     (void*)&WB_SupportsConcurrentGCPhaseControl},
   {CC"getConcurrentGCPhases",     CC"()[Ljava/lang/String;",
                                                       (void*)&WB_GetConcurrentGCPhases},
--- a/src/hotspot/share/runtime/arguments.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/arguments.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -30,6 +30,7 @@
 #include "classfile/stringTable.hpp"
 #include "classfile/symbolTable.hpp"
 #include "gc/shared/gcArguments.hpp"
+#include "gc/shared/gcConfig.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/referenceProcessor.hpp"
 #include "gc/shared/taskqueue.hpp"
@@ -49,7 +50,7 @@
 #include "runtime/globals.hpp"
 #include "runtime/globals_extension.hpp"
 #include "runtime/java.hpp"
-#include "runtime/os.hpp"
+#include "runtime/os.inline.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/safepointMechanism.hpp"
 #include "runtime/vm_version.hpp"
@@ -511,7 +512,6 @@
   { "InitialRAMFraction",           JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::undefined() },
   { "UseMembar",                    JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
   { "IgnoreUnverifiableClassesDuringDump", JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::undefined() },
-  { "CheckEndorsedAndExtDirs",      JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
   { "CompilerThreadHintNoPreempt",  JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) },
   { "VMThreadHintNoPreempt",        JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) },
   { "PrintSafepointStatistics",     JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) },
@@ -535,6 +535,7 @@
   { "ShowSafepointMsgs",             JDK_Version::undefined(), JDK_Version::jdk(11), JDK_Version::jdk(12) },
   { "FastTLABRefill",                JDK_Version::jdk(10),     JDK_Version::jdk(11), JDK_Version::jdk(12) },
   { "SafepointSpinBeforeYield",      JDK_Version::jdk(10),     JDK_Version::jdk(11), JDK_Version::jdk(12) },
+  { "CheckEndorsedAndExtDirs",       JDK_Version::jdk(10),     JDK_Version::jdk(11), JDK_Version::jdk(12) },
   { "DeferThrSuspendLoopCount",      JDK_Version::jdk(10),     JDK_Version::jdk(11), JDK_Version::jdk(12) },
   { "DeferPollingPageLoopCount",     JDK_Version::jdk(10),     JDK_Version::jdk(11), JDK_Version::jdk(12) },
   { "PermSize",                      JDK_Version::undefined(), JDK_Version::jdk(8),  JDK_Version::undefined() },
@@ -1749,7 +1750,7 @@
   // the alignments imposed by several sources: any requirements from the heap
   // itself, the collector policy and the maximum page size we may run the VM
   // with.
-  size_t heap_alignment = GCArguments::arguments()->conservative_max_heap_alignment();
+  size_t heap_alignment = GCConfig::arguments()->conservative_max_heap_alignment();
   _conservative_max_heap_alignment = MAX4(heap_alignment,
                                           (size_t)os::vm_allocation_granularity(),
                                           os::max_page_size(),
@@ -1815,10 +1816,7 @@
   }
 #endif
 
-  jint gc_result = GCArguments::initialize();
-  if (gc_result != JNI_OK) {
-    return gc_result;
-  }
+  GCConfig::initialize();
 
 #if COMPILER2_OR_JVMCI
   // Shared spaces work fine with other GCs but causes bytecode rewriting
@@ -2176,26 +2174,6 @@
 }
 #endif //INCLUDE_JVMCI
 
-// Check consistency of GC selection
-bool Arguments::check_gc_consistency() {
-  // Ensure that the user has not selected conflicting sets
-  // of collectors.
-  uint i = 0;
-  if (UseSerialGC)                       i++;
-  if (UseConcMarkSweepGC)                i++;
-  if (UseParallelGC || UseParallelOldGC) i++;
-  if (UseG1GC)                           i++;
-  if (i > 1) {
-    jio_fprintf(defaultStream::error_stream(),
-                "Conflicting collector combinations in option list; "
-                "please refer to the release notes for the combinations "
-                "allowed\n");
-    return false;
-  }
-
-  return true;
-}
-
 // Check the consistency of vm_init_args
 bool Arguments::check_vm_args_consistency() {
   // Method for adding checks for flag consistency.
@@ -2225,8 +2203,6 @@
     FLAG_SET_DEFAULT(UseGCOverheadLimit, false);
   }
 
-  status = status && check_gc_consistency();
-
   // CMS space iteration, which FLSVerifyAllHeapreferences entails,
   // insists that we hold the requisite locks so that the iteration is
   // MT-safe. For the verification at start-up and shut-down, we don't
@@ -3326,69 +3302,12 @@
   }
 }
 
-static bool has_jar_files(const char* directory) {
-  DIR* dir = os::opendir(directory);
-  if (dir == NULL) return false;
-
-  struct dirent *entry;
-  char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(directory), mtArguments);
-  bool hasJarFile = false;
-  while (!hasJarFile && (entry = os::readdir(dir, (dirent *) dbuf)) != NULL) {
-    const char* name = entry->d_name;
-    const char* ext = name + strlen(name) - 4;
-    hasJarFile = ext > name && (os::file_name_strcmp(ext, ".jar") == 0);
-  }
-  FREE_C_HEAP_ARRAY(char, dbuf);
-  os::closedir(dir);
-  return hasJarFile ;
-}
-
-static int check_non_empty_dirs(const char* path) {
-  const char separator = *os::path_separator();
-  const char* const end = path + strlen(path);
-  int nonEmptyDirs = 0;
-  while (path < end) {
-    const char* tmp_end = strchr(path, separator);
-    if (tmp_end == NULL) {
-      if (has_jar_files(path)) {
-        nonEmptyDirs++;
-        jio_fprintf(defaultStream::output_stream(),
-          "Non-empty directory: %s\n", path);
-      }
-      path = end;
-    } else {
-      char* dirpath = NEW_C_HEAP_ARRAY(char, tmp_end - path + 1, mtArguments);
-      memcpy(dirpath, path, tmp_end - path);
-      dirpath[tmp_end - path] = '\0';
-      if (has_jar_files(dirpath)) {
-        nonEmptyDirs++;
-        jio_fprintf(defaultStream::output_stream(),
-          "Non-empty directory: %s\n", dirpath);
-      }
-      FREE_C_HEAP_ARRAY(char, dirpath);
-      path = tmp_end + 1;
-    }
-  }
-  return nonEmptyDirs;
-}
-
 jint Arguments::finalize_vm_init_args(bool patch_mod_javabase) {
   // check if the default lib/endorsed directory exists; if so, error
   char path[JVM_MAXPATHLEN];
   const char* fileSep = os::file_separator();
   jio_snprintf(path, JVM_MAXPATHLEN, "%s%slib%sendorsed", Arguments::get_java_home(), fileSep, fileSep);
 
-  if (CheckEndorsedAndExtDirs) {
-    int nonEmptyDirs = 0;
-    // check endorsed directory
-    nonEmptyDirs += check_non_empty_dirs(path);
-    // check the extension directories
-    nonEmptyDirs += check_non_empty_dirs(Arguments::get_ext_dirs());
-    if (nonEmptyDirs > 0) {
-      return JNI_ERR;
-    }
-  }
-
   DIR* dir = os::opendir(path);
   if (dir != NULL) {
     jio_fprintf(defaultStream::output_stream(),
@@ -3493,6 +3412,10 @@
   }
 #endif
 
+#ifndef CAN_SHOW_REGISTERS_ON_ASSERT
+  UNSUPPORTED_OPTION(ShowRegistersOnAssert);
+#endif // CAN_SHOW_REGISTERS_ON_ASSERT
+
   return JNI_OK;
 }
 
@@ -4241,11 +4164,6 @@
 
   set_shared_spaces_flags();
 
-  // Check the GC selections again.
-  if (!check_gc_consistency()) {
-    return JNI_EINVAL;
-  }
-
   if (TieredCompilation) {
     set_tiered_flags();
   } else {
@@ -4278,7 +4196,7 @@
   // Set heap size based on available physical memory
   set_heap_size();
 
-  GCArguments::arguments()->initialize_flags();
+  GCConfig::arguments()->initialize();
 
   // Initialize Metaspace flags and alignments
   Metaspace::ergo_initialize();
--- a/src/hotspot/share/runtime/biasedLocking.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/biasedLocking.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -254,7 +254,7 @@
   BasicLock* highest_lock = NULL;
   for (int i = 0; i < cached_monitor_info->length(); i++) {
     MonitorInfo* mon_info = cached_monitor_info->at(i);
-    if (mon_info->owner() == obj) {
+    if (oopDesc::equals(mon_info->owner(), obj)) {
       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
                                p2i((void *) mon_info->owner()),
                                p2i((void *) obj));
--- a/src/hotspot/share/runtime/deoptimization.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/deoptimization.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -48,6 +48,7 @@
 #include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/stubRoutines.hpp"
--- a/src/hotspot/share/runtime/extendedPC.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/extendedPC.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_RUNTIME_EXTENDEDPC_HPP
 #define SHARE_VM_RUNTIME_EXTENDEDPC_HPP
 
-#include "memory/allocation.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 // An ExtendedPC contains the _pc from a signal handler in a platform
--- a/src/hotspot/share/runtime/globals.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/globals.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1241,9 +1241,6 @@
   product(bool, CheckJNICalls, false,                                       \
           "Verify all arguments to JNI calls")                              \
                                                                             \
-  product(bool, CheckEndorsedAndExtDirs, false,                             \
-          "Verify the endorsed and extension directories are not used")     \
-                                                                            \
   product(bool, UseFastJNIAccessors, true,                                  \
           "Use optimized versions of Get<Primitive>Field")                  \
                                                                             \
@@ -4065,6 +4062,9 @@
   develop(bool, VerifyMetaspace, false,                                     \
           "Verify metaspace on chunk movements.")                           \
                                                                             \
+  diagnostic(bool, ShowRegistersOnAssert, false,                            \
+          "On internal errors, include registers in error report.")         \
+                                                                            \
 
 
 
--- a/src/hotspot/share/runtime/handles.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/handles.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -77,8 +77,9 @@
   // General access
   oop     operator () () const                   { return obj(); }
   oop     operator -> () const                   { return non_null_obj(); }
-  bool    operator == (oop o) const              { return obj() == o; }
-  bool    operator == (const Handle& h) const          { return obj() == h.obj(); }
+
+  bool operator == (oop o) const                 { return oopDesc::equals(obj(), o); }
+  bool operator == (const Handle& h) const       { return oopDesc::equals(obj(), h.obj()); }
 
   // Null checks
   bool    is_null() const                        { return _handle == NULL; }
--- a/src/hotspot/share/runtime/interfaceSupport.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/interfaceSupport.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -35,6 +35,7 @@
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.inline.hpp"
 #include "runtime/thread.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/vframe.hpp"
 #include "runtime/vmThread.hpp"
 #include "utilities/preserveException.hpp"
@@ -298,3 +299,40 @@
   }
 #endif
 }
+
+#ifdef ASSERT
+// JRT_LEAF rules:
+// A JRT_LEAF method may not interfere with safepointing by
+//   1) acquiring or blocking on a Mutex or JavaLock - checked
+//   2) allocating heap memory - checked
+//   3) executing a VM operation - checked
+//   4) executing a system call (including malloc) that could block or grab a lock
+//   5) invoking GC
+//   6) reaching a safepoint
+//   7) running too long
+// Nor may any method it calls.
+JRTLeafVerifier::JRTLeafVerifier()
+  : NoSafepointVerifier(true, JRTLeafVerifier::should_verify_GC())
+{
+}
+
+JRTLeafVerifier::~JRTLeafVerifier()
+{
+}
+
+bool JRTLeafVerifier::should_verify_GC() {
+  switch (JavaThread::current()->thread_state()) {
+  case _thread_in_Java:
+    // is in a leaf routine, there must be no safepoint.
+    return true;
+  case _thread_in_native:
+    // A native thread is not subject to safepoints.
+    // Even while it is in a leaf routine, GC is ok
+    return false;
+  default:
+    // Leaf routines cannot be called from other contexts.
+    ShouldNotReachHere();
+    return false;
+  }
+}
+#endif // ASSERT
--- a/src/hotspot/share/runtime/interfaceSupport.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/interfaceSupport.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -25,12 +25,12 @@
 #ifndef SHARE_VM_RUNTIME_INTERFACESUPPORT_INLINE_HPP
 #define SHARE_VM_RUNTIME_INTERFACESUPPORT_INLINE_HPP
 
-#include "gc/shared/gcLocker.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 #include "runtime/safepointMechanism.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/thread.hpp"
 #include "runtime/vm_operations.hpp"
 #include "utilities/globalDefinitions.hpp"
@@ -356,6 +356,24 @@
 
 // VM-internal runtime interface support
 
+// Definitions for JRT (Java (Compiler/Shared) Runtime)
+
+// JRT_LEAF currently can be called from either _thread_in_Java or
+// _thread_in_native mode. In _thread_in_native, it is ok
+// for another thread to trigger GC. The rest of the JRT_LEAF
+// rules apply.
+class JRTLeafVerifier : public NoSafepointVerifier {
+  static bool should_verify_GC();
+ public:
+#ifdef ASSERT
+  JRTLeafVerifier();
+  ~JRTLeafVerifier();
+#else
+  JRTLeafVerifier() {}
+  ~JRTLeafVerifier() {}
+#endif
+};
+
 #ifdef ASSERT
 
 class RuntimeHistogramElement : public HistogramElement {
@@ -436,9 +454,6 @@
 
 #define IRT_END }
 
-
-// Definitions for JRT (Java (Compiler/Shared) Runtime)
-
 #define JRT_ENTRY(result_type, header)                               \
   result_type header {                                               \
     ThreadInVMfromJava __tiv(thread);                                \
--- a/src/hotspot/share/runtime/java.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/java.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -316,8 +316,13 @@
     CodeCache::print();
   }
 
-  if (PrintMethodFlushingStatistics) {
-    NMethodSweeper::print();
+  // CodeHeap State Analytics.
+  // Does also call NMethodSweeper::print(tty)
+  LogTarget(Trace, codecache) lt;
+  if (lt.is_enabled()) {
+    CompileBroker::print_heapinfo(NULL, "all", "4096"); // details
+  } else if (PrintMethodFlushingStatistics) {
+    NMethodSweeper::print(tty);
   }
 
   if (PrintCodeCache2) {
@@ -379,8 +384,13 @@
     CodeCache::print();
   }
 
-  if (PrintMethodFlushingStatistics) {
-    NMethodSweeper::print();
+  // CodeHeap State Analytics.
+  // Does also call NMethodSweeper::print(tty)
+  LogTarget(Trace, codecache) lt;
+  if (lt.is_enabled()) {
+    CompileBroker::print_heapinfo(NULL, "all", "4096"); // details
+  } else if (PrintMethodFlushingStatistics) {
+    NMethodSweeper::print(tty);
   }
 
 #ifdef COMPILER2
--- a/src/hotspot/share/runtime/javaCalls.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/javaCalls.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -40,6 +40,7 @@
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/os.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
--- a/src/hotspot/share/runtime/jniHandles.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/jniHandles.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,6 +26,7 @@
 #include "gc/shared/oopStorage.inline.hpp"
 #include "logging/log.hpp"
 #include "memory/iterator.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/jniHandles.inline.hpp"
@@ -34,9 +35,6 @@
 #include "trace/traceMacros.hpp"
 #include "utilities/align.hpp"
 #include "utilities/debug.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#endif
 
 OopStorage* JNIHandles::_global_handles = NULL;
 OopStorage* JNIHandles::_weak_global_handles = NULL;
@@ -101,7 +99,8 @@
     oop* ptr = _global_handles->allocate();
     // Return NULL on allocation failure.
     if (ptr != NULL) {
-      *ptr = obj();
+      assert(*ptr == NULL, "invariant");
+      RootAccess<IN_CONCURRENT_ROOT>::oop_store(ptr, obj());
       res = reinterpret_cast<jobject>(ptr);
     } else {
       report_handle_allocation_failure(alloc_failmode, "global");
@@ -124,7 +123,8 @@
     oop* ptr = _weak_global_handles->allocate();
     // Return NULL on allocation failure.
     if (ptr != NULL) {
-      *ptr = obj();
+      assert(*ptr == NULL, "invariant");
+      RootAccess<ON_PHANTOM_OOP_REF>::oop_store(ptr, obj());
       char* tptr = reinterpret_cast<char*>(ptr) + weak_tag_value;
       res = reinterpret_cast<jobject>(tptr);
     } else {
@@ -151,26 +151,23 @@
 oop JNIHandles::resolve_jweak(jweak handle) {
   assert(handle != NULL, "precondition");
   assert(is_jweak(handle), "precondition");
-  oop result = jweak_ref(handle);
-#if INCLUDE_ALL_GCS
-  if (result != NULL && UseG1GC) {
-    G1BarrierSet::enqueue(result);
-  }
-#endif // INCLUDE_ALL_GCS
-  return result;
+  return RootAccess<ON_PHANTOM_OOP_REF>::oop_load(jweak_ptr(handle));
 }
 
 bool JNIHandles::is_global_weak_cleared(jweak handle) {
   assert(handle != NULL, "precondition");
   assert(is_jweak(handle), "not a weak handle");
-  return jweak_ref(handle) == NULL;
+  oop* oop_ptr = jweak_ptr(handle);
+  oop value = RootAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(oop_ptr);
+  return value == NULL;
 }
 
 void JNIHandles::destroy_global(jobject handle) {
   if (handle != NULL) {
     assert(!is_jweak(handle), "wrong method for detroying jweak");
-    jobject_ref(handle) = NULL;
-    _global_handles->release(&jobject_ref(handle));
+    oop* oop_ptr = jobject_ptr(handle);
+    RootAccess<IN_CONCURRENT_ROOT>::oop_store(oop_ptr, (oop)NULL);
+    _global_handles->release(oop_ptr);
   }
 }
 
@@ -178,8 +175,9 @@
 void JNIHandles::destroy_weak_global(jobject handle) {
   if (handle != NULL) {
     assert(is_jweak(handle), "JNI handle not jweak");
-    jweak_ref(handle) = NULL;
-    _weak_global_handles->release(&jweak_ref(handle));
+    oop* oop_ptr = jweak_ptr(handle);
+    RootAccess<ON_PHANTOM_OOP_REF>::oop_store(oop_ptr, (oop)NULL);
+    _weak_global_handles->release(oop_ptr);
   }
 }
 
@@ -218,11 +216,11 @@
   assert(handle != NULL, "precondition");
   jobjectRefType result = JNIInvalidRefType;
   if (is_jweak(handle)) {
-    if (is_storage_handle(_weak_global_handles, &jweak_ref(handle))) {
+    if (is_storage_handle(_weak_global_handles, jweak_ptr(handle))) {
       result = JNIWeakGlobalRefType;
     }
   } else {
-    switch (_global_handles->allocation_status(&jobject_ref(handle))) {
+    switch (_global_handles->allocation_status(jobject_ptr(handle))) {
     case OopStorage::ALLOCATED_ENTRY:
       result = JNIGlobalRefType;
       break;
@@ -279,13 +277,13 @@
 
 bool JNIHandles::is_global_handle(jobject handle) {
   assert(handle != NULL, "precondition");
-  return !is_jweak(handle) && is_storage_handle(_global_handles, &jobject_ref(handle));
+  return !is_jweak(handle) && is_storage_handle(_global_handles, jobject_ptr(handle));
 }
 
 
 bool JNIHandles::is_weak_global_handle(jobject handle) {
   assert(handle != NULL, "precondition");
-  return is_jweak(handle) && is_storage_handle(_weak_global_handles, &jweak_ref(handle));
+  return is_jweak(handle) && is_storage_handle(_weak_global_handles, jweak_ptr(handle));
 }
 
 size_t JNIHandles::global_handle_memory_usage() {
@@ -351,6 +349,8 @@
   // Zap block values
   _top = 0;
   for (int index = 0; index < block_size_in_oops; index++) {
+    // NOT using Access here; just bare clobbering to NULL, since the
+    // block no longer contains valid oops.
     _handles[index] = NULL;
   }
 }
@@ -506,7 +506,7 @@
   // Try last block
   if (_last->_top < block_size_in_oops) {
     oop* handle = &(_last->_handles)[_last->_top++];
-    *handle = obj;
+    RootAccess<AS_DEST_NOT_INITIALIZED>::oop_store(handle, obj);
     return (jobject) handle;
   }
 
@@ -514,7 +514,7 @@
   if (_free_list != NULL) {
     oop* handle = _free_list;
     _free_list = (oop*) *_free_list;
-    *handle = obj;
+    RootAccess<AS_DEST_NOT_INITIALIZED>::oop_store(handle, obj);
     return (jobject) handle;
   }
   // Check if unused block follow last
--- a/src/hotspot/share/runtime/jniHandles.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/jniHandles.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -28,10 +28,8 @@
 #include "memory/allocation.hpp"
 #include "runtime/handles.hpp"
 
-class JNIHandleBlock;
 class OopStorage;
 
-
 // Interface for creating and resolving local/global JNI handles
 
 class JNIHandles : AllStatic {
@@ -41,8 +39,8 @@
   static OopStorage* _weak_global_handles;
 
   inline static bool is_jweak(jobject handle);
-  inline static oop& jobject_ref(jobject handle); // NOT jweak!
-  inline static oop& jweak_ref(jobject handle);
+  inline static oop* jobject_ptr(jobject handle); // NOT jweak!
+  inline static oop* jweak_ptr(jobject handle);
 
   template<bool external_guard> inline static oop resolve_impl(jobject handle);
   static oop resolve_jweak(jweak handle);
--- a/src/hotspot/share/runtime/jniHandles.inline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/jniHandles.inline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -25,6 +25,7 @@
 #ifndef SHARE_RUNTIME_JNIHANDLES_INLINE_HPP
 #define SHARE_RUNTIME_JNIHANDLES_INLINE_HPP
 
+#include "oops/access.inline.hpp"
 #include "oops/oop.hpp"
 #include "runtime/jniHandles.hpp"
 #include "utilities/debug.hpp"
@@ -36,15 +37,15 @@
   return (reinterpret_cast<uintptr_t>(handle) & weak_tag_mask) != 0;
 }
 
-inline oop& JNIHandles::jobject_ref(jobject handle) {
+inline oop* JNIHandles::jobject_ptr(jobject handle) {
   assert(!is_jweak(handle), "precondition");
-  return *reinterpret_cast<oop*>(handle);
+  return reinterpret_cast<oop*>(handle);
 }
 
-inline oop& JNIHandles::jweak_ref(jobject handle) {
+inline oop* JNIHandles::jweak_ptr(jobject handle) {
   assert(is_jweak(handle), "precondition");
   char* ptr = reinterpret_cast<char*>(handle) - weak_tag_value;
-  return *reinterpret_cast<oop*>(ptr);
+  return reinterpret_cast<oop*>(ptr);
 }
 
 // external_guard is true if called from resolve_external_guard.
@@ -56,7 +57,7 @@
   if (is_jweak(handle)) {       // Unlikely
     result = resolve_jweak(handle);
   } else {
-    result = jobject_ref(handle);
+    result = RootAccess<IN_CONCURRENT_ROOT>::oop_load(jobject_ptr(handle));
     // Construction of jobjects canonicalize a null value into a null
     // jobject, so for non-jweak the pointee should never be null.
     assert(external_guard || result != NULL, "Invalid JNI handle");
@@ -82,7 +83,7 @@
 inline void JNIHandles::destroy_local(jobject handle) {
   if (handle != NULL) {
     assert(!is_jweak(handle), "Invalid JNI local handle");
-    jobject_ref(handle) = NULL;
+    RootAccess<>::oop_store(jobject_ptr(handle), (oop)NULL);
   }
 }
 
--- a/src/hotspot/share/runtime/mutexLocker.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/mutexLocker.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -116,7 +116,6 @@
 Mutex*   OopMapCacheAlloc_lock        = NULL;
 
 Mutex*   FreeList_lock                = NULL;
-Monitor* SecondaryFreeList_lock       = NULL;
 Mutex*   OldSets_lock                 = NULL;
 Monitor* RootRegionScan_lock          = NULL;
 
@@ -137,6 +136,9 @@
 #ifndef SUPPORTS_NATIVE_CX8
 Mutex*   UnsafeJlong_lock             = NULL;
 #endif
+Monitor* CodeHeapStateAnalytics_lock  = NULL;
+
+Mutex*   MetaspaceExpand_lock         = NULL;
 
 #define MAX_NUM_MUTEX 128
 static Monitor * _mutex_array[MAX_NUM_MUTEX];
@@ -191,7 +193,6 @@
     def(Shared_DirtyCardQ_lock     , PaddedMutex  , access + 1,  true,  Monitor::_safepoint_check_never);
 
     def(FreeList_lock              , PaddedMutex  , leaf     ,   true,  Monitor::_safepoint_check_never);
-    def(SecondaryFreeList_lock     , PaddedMonitor, leaf     ,   true,  Monitor::_safepoint_check_never);
     def(OldSets_lock               , PaddedMutex  , leaf     ,   true,  Monitor::_safepoint_check_never);
     def(RootRegionScan_lock        , PaddedMonitor, leaf     ,   true,  Monitor::_safepoint_check_never);
 
@@ -210,6 +211,8 @@
   def(RawMonitor_lock              , PaddedMutex  , special,     true,  Monitor::_safepoint_check_never);
   def(OopMapCacheAlloc_lock        , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_always);     // used for oop_map_cache allocation.
 
+  def(MetaspaceExpand_lock         , PaddedMutex  , leaf-1,      true,  Monitor::_safepoint_check_never);
+
   def(Patching_lock                , PaddedMutex  , special,     true,  Monitor::_safepoint_check_never);      // used for safepointing and code patching.
   def(Service_lock                 , PaddedMonitor, special,     true,  Monitor::_safepoint_check_never);      // used for service thread operations
   def(JmethodIdCreation_lock       , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_always);     // used for creating jmethodIDs.
@@ -297,6 +300,8 @@
 #ifndef SUPPORTS_NATIVE_CX8
   def(UnsafeJlong_lock             , PaddedMutex  , special,     false, Monitor::_safepoint_check_never);
 #endif
+
+  def(CodeHeapStateAnalytics_lock  , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_never);
 }
 
 GCMutexLocker::GCMutexLocker(Monitor * mutex) {
--- a/src/hotspot/share/runtime/mutexLocker.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/mutexLocker.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -117,7 +117,6 @@
 extern Mutex*   OopMapCacheAlloc_lock;           // protects allocation of oop_map caches
 
 extern Mutex*   FreeList_lock;                   // protects the free region list during safepoints
-extern Monitor* SecondaryFreeList_lock;          // protects the secondary free region list
 extern Mutex*   OldSets_lock;                    // protects the old region sets
 extern Monitor* RootRegionScan_lock;             // used to notify that the CM threads have finished scanning the IM snapshot regions
 
@@ -137,6 +136,12 @@
 extern Mutex*   UnsafeJlong_lock;                // provides Unsafe atomic updates to jlongs on platforms that don't support cx8
 #endif
 
+extern Mutex*   MetaspaceExpand_lock;            // protects Metaspace virtualspace and chunk expansions
+
+
+extern Monitor* CodeHeapStateAnalytics_lock;     // lock print functions against concurrent analyze functions.
+                                                 // Only used locally in PrintCodeCacheLayout processing.
+
 // A MutexLocker provides mutual exclusion with respect to a given mutex
 // for the scope which contains the locker.  The lock is an OS lock, not
 // an object lock, and the two do not interoperate.  Do not use Mutex-based
--- a/src/hotspot/share/runtime/objectMonitor.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/objectMonitor.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -37,6 +37,7 @@
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/safepointMechanism.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
 #include "services/threadService.hpp"
--- a/src/hotspot/share/runtime/os.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/os.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -53,6 +53,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/os.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadSMR.hpp"
@@ -1157,32 +1158,10 @@
   st->print_cr(INTPTR_FORMAT " is an unknown value", p2i(addr));
 }
 
-// Looks like all platforms except IA64 can use the same function to check
-// if C stack is walkable beyond current frame. The check for fp() is not
+// Looks like all platforms can use the same function to check if C
+// stack is walkable beyond current frame. The check for fp() is not
 // necessary on Sparc, but it's harmless.
 bool os::is_first_C_frame(frame* fr) {
-#if (defined(IA64) && !defined(AIX)) && !defined(_WIN32)
-  // On IA64 we have to check if the callers bsp is still valid
-  // (i.e. within the register stack bounds).
-  // Notice: this only works for threads created by the VM and only if
-  // we walk the current stack!!! If we want to be able to walk
-  // arbitrary other threads, we'll have to somehow store the thread
-  // object in the frame.
-  Thread *thread = Thread::current();
-  if ((address)fr->fp() <=
-      thread->register_stack_base() HPUX_ONLY(+ 0x0) LINUX_ONLY(+ 0x50)) {
-    // This check is a little hacky, because on Linux the first C
-    // frame's ('start_thread') register stack frame starts at
-    // "register_stack_base + 0x48" while on HPUX, the first C frame's
-    // ('__pthread_bound_body') register stack frame seems to really
-    // start at "register_stack_base".
-    return true;
-  } else {
-    return false;
-  }
-#elif defined(IA64) && defined(_WIN32)
-  return true;
-#else
   // Load up sp, fp, sender sp and sender fp, check for reasonable values.
   // Check usp first, because if that's bad the other accessors may fault
   // on some architectures.  Ditto ufp second, etc.
@@ -1212,7 +1191,6 @@
   if (old_fp - ufp > 64 * K) return true;
 
   return false;
-#endif
 }
 
 
--- a/src/hotspot/share/runtime/reflection.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/reflection.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -418,7 +418,7 @@
     assert(lower_dim->is_array_klass(), "just checking");
     result2 = lower_dim->java_mirror();
   }
-  assert(result == result2, "results must be consistent");
+  assert(oopDesc::equals(result, result2), "results must be consistent");
 #endif //ASSERT
   return result;
 }
--- a/src/hotspot/share/runtime/safepoint.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/safepoint.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -33,7 +33,7 @@
 #include "code/pcDesc.hpp"
 #include "code/scopeDesc.hpp"
 #include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/strongRootsScope.hpp"
 #include "gc/shared/workgroup.hpp"
 #include "interpreter/interpreter.hpp"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/safepointVerifiers.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/safepointVerifiers.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "memory/universe.hpp"
+#include "utilities/debug.hpp"
+
+// Implementation of NoGCVerifier
+
+#ifdef ASSERT
+
+NoGCVerifier::NoGCVerifier(bool verifygc) {
+  _verifygc = verifygc;
+  if (_verifygc) {
+    CollectedHeap* h = Universe::heap();
+    assert(!h->is_gc_active(), "GC active during NoGCVerifier");
+    _old_invocations = h->total_collections();
+  }
+}
+
+
+NoGCVerifier::~NoGCVerifier() {
+  if (_verifygc) {
+    CollectedHeap* h = Universe::heap();
+    assert(!h->is_gc_active(), "GC active during NoGCVerifier");
+    if (_old_invocations != h->total_collections()) {
+      fatal("collection in a NoGCVerifier secured function");
+    }
+  }
+}
+
+PauseNoGCVerifier::PauseNoGCVerifier(NoGCVerifier * ngcv) {
+  _ngcv = ngcv;
+  if (_ngcv->_verifygc) {
+    // if we were verifying, then make sure that nothing is
+    // wrong before we "pause" verification
+    CollectedHeap* h = Universe::heap();
+    assert(!h->is_gc_active(), "GC active during NoGCVerifier");
+    if (_ngcv->_old_invocations != h->total_collections()) {
+      fatal("collection in a NoGCVerifier secured function");
+    }
+  }
+}
+
+
+PauseNoGCVerifier::~PauseNoGCVerifier() {
+  if (_ngcv->_verifygc) {
+    // if we were verifying before, then reenable verification
+    CollectedHeap* h = Universe::heap();
+    assert(!h->is_gc_active(), "GC active during NoGCVerifier");
+    _ngcv->_old_invocations = h->total_collections();
+  }
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/safepointVerifiers.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_SAFEPOINTVERIFIERS_HPP
+#define SHARE_VM_RUNTIME_SAFEPOINTVERIFIERS_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/thread.hpp"
+
+// A NoGCVerifier object can be placed in methods where one assumes that
+// no garbage collection will occur. The destructor will verify this property
+// unless the constructor is called with argument false (not verifygc).
+//
+// The check will only be done in debug mode and if verifygc true.
+
+class NoGCVerifier: public StackObj {
+ friend class PauseNoGCVerifier;
+
+ protected:
+  bool _verifygc;
+  unsigned int _old_invocations;
+
+ public:
+#ifdef ASSERT
+  NoGCVerifier(bool verifygc = true);
+  ~NoGCVerifier();
+#else
+  NoGCVerifier(bool verifygc = true) {}
+  ~NoGCVerifier() {}
+#endif
+};
+
+// A PauseNoGCVerifier is used to temporarily pause the behavior
+// of a NoGCVerifier object. If we are not in debug mode or if the
+// NoGCVerifier object has a _verifygc value of false, then there
+// is nothing to do.
+
+class PauseNoGCVerifier: public StackObj {
+ private:
+  NoGCVerifier * _ngcv;
+
+ public:
+#ifdef ASSERT
+  PauseNoGCVerifier(NoGCVerifier * ngcv);
+  ~PauseNoGCVerifier();
+#else
+  PauseNoGCVerifier(NoGCVerifier * ngcv) {}
+  ~PauseNoGCVerifier() {}
+#endif
+};
+
+
+// A NoSafepointVerifier object will throw an assertion failure if
+// the current thread passes a possible safepoint while this object is
+// instantiated. A safepoint, will either be: an oop allocation, blocking
+// on a Mutex or JavaLock, or executing a VM operation.
+//
+// If StrictSafepointChecks is turned off, it degrades into a NoGCVerifier
+//
+class NoSafepointVerifier : public NoGCVerifier {
+ friend class PauseNoSafepointVerifier;
+
+ private:
+  bool _activated;
+  Thread *_thread;
+ public:
+#ifdef ASSERT
+  NoSafepointVerifier(bool activated = true, bool verifygc = true ) :
+    NoGCVerifier(verifygc),
+    _activated(activated) {
+    _thread = Thread::current();
+    if (_activated) {
+      _thread->_allow_allocation_count++;
+      _thread->_allow_safepoint_count++;
+    }
+  }
+
+  ~NoSafepointVerifier() {
+    if (_activated) {
+      _thread->_allow_allocation_count--;
+      _thread->_allow_safepoint_count--;
+    }
+  }
+#else
+  NoSafepointVerifier(bool activated = true, bool verifygc = true) : NoGCVerifier(verifygc){}
+  ~NoSafepointVerifier() {}
+#endif
+};
+
+// A PauseNoSafepointVerifier is used to temporarily pause the
+// behavior of a NoSafepointVerifier object. If we are not in debug
+// mode then there is nothing to do. If the NoSafepointVerifier
+// object has an _activated value of false, then there is nothing to
+// do for safepoint and allocation checking, but there may still be
+// something to do for the underlying NoGCVerifier object.
+
+class PauseNoSafepointVerifier : public PauseNoGCVerifier {
+ private:
+  NoSafepointVerifier * _nsv;
+
+ public:
+#ifdef ASSERT
+  PauseNoSafepointVerifier(NoSafepointVerifier * nsv)
+    : PauseNoGCVerifier(nsv) {
+
+    _nsv = nsv;
+    if (_nsv->_activated) {
+      _nsv->_thread->_allow_allocation_count--;
+      _nsv->_thread->_allow_safepoint_count--;
+    }
+  }
+
+  ~PauseNoSafepointVerifier() {
+    if (_nsv->_activated) {
+      _nsv->_thread->_allow_allocation_count++;
+      _nsv->_thread->_allow_safepoint_count++;
+    }
+  }
+#else
+  PauseNoSafepointVerifier(NoSafepointVerifier * nsv)
+    : PauseNoGCVerifier(nsv) {}
+  ~PauseNoSafepointVerifier() {}
+#endif
+};
+
+// A NoAllocVerifier object can be placed in methods where one assumes that
+// no allocation will occur. The destructor will verify this property
+// unless the constructor is called with argument false (not activated).
+//
+// The check will only be done in debug mode and if activated.
+// Note: this only makes sense at safepoints (otherwise, other threads may
+// allocate concurrently.)
+
+class NoAllocVerifier : public StackObj {
+ private:
+  bool  _activated;
+
+ public:
+#ifdef ASSERT
+  NoAllocVerifier(bool activated = true) {
+    _activated = activated;
+    if (_activated) Thread::current()->_allow_allocation_count++;
+  }
+
+  ~NoAllocVerifier() {
+    if (_activated) Thread::current()->_allow_allocation_count--;
+  }
+#else
+  NoAllocVerifier(bool activated = true) {}
+  ~NoAllocVerifier() {}
+#endif
+};
+
+#endif // SHARE_VM_RUNTIME_SAFEPOINTVERIFIERS_HPP
--- a/src/hotspot/share/runtime/simpleThresholdPolicy.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/simpleThresholdPolicy.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -24,10 +24,10 @@
 
 #include "precompiled.hpp"
 #include "compiler/compileBroker.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/handles.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/simpleThresholdPolicy.hpp"
 #include "runtime/simpleThresholdPolicy.inline.hpp"
 #include "code/scopeDesc.hpp"
--- a/src/hotspot/share/runtime/stackValue.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/stackValue.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -24,7 +24,8 @@
 
 #include "precompiled.hpp"
 #include "code/debugInfo.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "oops/oop.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/stackValue.hpp"
@@ -103,7 +104,7 @@
         value.noop = *(narrowOop*) value_addr;
       }
       // Decode narrowoop and wrap a handle around the oop
-      Handle h(Thread::current(), oopDesc::decode_heap_oop(value.noop));
+      Handle h(Thread::current(), CompressedOops::decode(value.noop));
       return new StackValue(h);
     }
 #endif
--- a/src/hotspot/share/runtime/sweeper.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/sweeper.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -824,12 +824,13 @@
   }
 }
 
-void NMethodSweeper::print() {
+void NMethodSweeper::print(outputStream* out) {
   ttyLocker ttyl;
-  tty->print_cr("Code cache sweeper statistics:");
-  tty->print_cr("  Total sweep time:                %1.0lfms", (double)_total_time_sweeping.value()/1000000);
-  tty->print_cr("  Total number of full sweeps:     %ld", _total_nof_code_cache_sweeps);
-  tty->print_cr("  Total number of flushed methods: %ld(%ld C2 methods)", _total_nof_methods_reclaimed,
+  out = (out == NULL) ? tty : out;
+  out->print_cr("Code cache sweeper statistics:");
+  out->print_cr("  Total sweep time:                %1.0lf ms", (double)_total_time_sweeping.value()/1000000);
+  out->print_cr("  Total number of full sweeps:     %ld", _total_nof_code_cache_sweeps);
+  out->print_cr("  Total number of flushed methods: %ld (thereof %ld C2 methods)", _total_nof_methods_reclaimed,
                                                     _total_nof_c2_methods_reclaimed);
-  tty->print_cr("  Total size of flushed methods:   " SIZE_FORMAT "kB", _total_flushed_size/K);
+  out->print_cr("  Total size of flushed methods:   " SIZE_FORMAT " kB", _total_flushed_size/K);
 }
--- a/src/hotspot/share/runtime/sweeper.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/sweeper.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -125,7 +125,8 @@
   static void report_state_change(nmethod* nm);
   static void possibly_enable_sweeper();
   static void possibly_flush(nmethod* nm);
-  static void print();   // Printing/debugging
+  static void print(outputStream* out);   // Printing/debugging
+  static void print() { print(tty); }
 };
 
 #endif // SHARE_VM_RUNTIME_SWEEPER_HPP
--- a/src/hotspot/share/runtime/synchronizer.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/synchronizer.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -39,6 +39,8 @@
 #include "runtime/objectMonitor.hpp"
 #include "runtime/objectMonitor.inline.hpp"
 #include "runtime/osThread.hpp"
+#include "runtime/safepointVerifiers.hpp"
+#include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/synchronizer.hpp"
 #include "runtime/thread.inline.hpp"
@@ -171,7 +173,7 @@
 
   if (mark->has_monitor()) {
     ObjectMonitor * const mon = mark->monitor();
-    assert(mon->object() == obj, "invariant");
+    assert(oopDesc::equals((oop) mon->object(), obj), "invariant");
     if (mon->owner() != self) return false;  // slow-path for IMS exception
 
     if (mon->first_waiter() != NULL) {
@@ -215,7 +217,7 @@
 
   if (mark->has_monitor()) {
     ObjectMonitor * const m = mark->monitor();
-    assert(m->object() == obj, "invariant");
+    assert(oopDesc::equals((oop) m->object(), obj), "invariant");
     Thread * const owner = (Thread *) m->_owner;
 
     // Lock contention and Transactional Lock Elision (TLE) diagnostics
@@ -1402,7 +1404,7 @@
     if (mark->has_monitor()) {
       ObjectMonitor * inf = mark->monitor();
       assert(inf->header()->is_neutral(), "invariant");
-      assert(inf->object() == object, "invariant");
+      assert(oopDesc::equals((oop) inf->object(), object), "invariant");
       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
       return inf;
     }
--- a/src/hotspot/share/runtime/thread.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/thread.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -48,6 +48,7 @@
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/objArrayOop.hpp"
 #include "oops/oop.inline.hpp"
@@ -113,7 +114,7 @@
 #include "utilities/vmError.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/g1/concurrentMarkThread.inline.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
 #include "gc/parallel/pcTasks.hpp"
 #endif // INCLUDE_ALL_GCS
 #if INCLUDE_JVMCI
@@ -2391,11 +2392,13 @@
 }
 
 #ifdef ASSERT
-// verify the JavaThread has not yet been published in the Threads::list, and
-// hence doesn't need protection from concurrent access at this stage
+// Verify the JavaThread has not yet been published in the Threads::list, and
+// hence doesn't need protection from concurrent access at this stage.
 void JavaThread::verify_not_published() {
-  ThreadsListHandle tlh;
-  assert(!tlh.includes(this), "JavaThread shouldn't have been published yet!");
+  // Cannot create a ThreadsListHandle here and check !tlh.includes(this)
+  // since an unpublished JavaThread doesn't participate in the
+  // Thread-SMR protocol for keeping a ThreadsList alive.
+  assert(!on_thread_list(), "JavaThread shouldn't have been published yet!");
 }
 #endif
 
@@ -3219,7 +3222,7 @@
 class PrintAndVerifyOopClosure: public OopClosure {
  protected:
   template <class T> inline void do_oop_work(T* p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
+    oop obj = RawAccess<>::oop_load(p);
     if (obj == NULL) return;
     tty->print(INTPTR_FORMAT ": ", p2i(p));
     if (oopDesc::is_oop_or_null(obj)) {
@@ -3658,6 +3661,13 @@
   // Timing (must come after argument parsing)
   TraceTime timer("Create VM", TRACETIME_LOG(Info, startuptime));
 
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  // Initialize assert poison page mechanism.
+  if (ShowRegistersOnAssert) {
+    initialize_assert_poison();
+  }
+#endif // CAN_SHOW_REGISTERS_ON_ASSERT
+
   // Initialize the os module after parsing the args
   jint os_init_2_result = os::init_2();
   if (os_init_2_result != JNI_OK) return os_init_2_result;
@@ -3834,7 +3844,28 @@
 
   // initialize compiler(s)
 #if defined(COMPILER1) || COMPILER2_OR_JVMCI
-  CompileBroker::compilation_init(CHECK_JNI_ERR);
+#if INCLUDE_JVMCI
+  bool force_JVMCI_intialization = false;
+  if (EnableJVMCI) {
+    // Initialize JVMCI eagerly when it is explicitly requested.
+    // Or when JVMCIPrintProperties is enabled.
+    // The JVMCI Java initialization code will read this flag and
+    // do the printing if it's set.
+    force_JVMCI_intialization = EagerJVMCI || JVMCIPrintProperties;
+
+    if (!force_JVMCI_intialization) {
+      // 8145270: Force initialization of JVMCI runtime otherwise requests for blocking
+      // compilations via JVMCI will not actually block until JVMCI is initialized.
+      force_JVMCI_intialization = UseJVMCICompiler && (!UseInterpreter || !BackgroundCompilation);
+    }
+  }
+#endif
+  CompileBroker::compilation_init_phase1(CHECK_JNI_ERR);
+  // Postpone completion of compiler initialization to after JVMCI
+  // is initialized to avoid timeouts of blocking compilations.
+  if (JVMCI_ONLY(!force_JVMCI_intialization) NOT_JVMCI(true)) {
+    CompileBroker::compilation_init_phase2();
+  }
 #endif
 
   // Pre-initialize some JSR292 core classes to avoid deadlock during class loading.
@@ -3861,22 +3892,9 @@
   SystemDictionary::compute_java_loaders(CHECK_JNI_ERR);
 
 #if INCLUDE_JVMCI
-  if (EnableJVMCI) {
-    // Initialize JVMCI eagerly when it is explicitly requested.
-    // Or when JVMCIPrintProperties is enabled.
-    // The JVMCI Java initialization code will read this flag and
-    // do the printing if it's set.
-    bool init = EagerJVMCI || JVMCIPrintProperties;
-
-    if (!init) {
-      // 8145270: Force initialization of JVMCI runtime otherwise requests for blocking
-      // compilations via JVMCI will not actually block until JVMCI is initialized.
-      init = UseJVMCICompiler && (!UseInterpreter || !BackgroundCompilation);
-    }
-
-    if (init) {
-      JVMCIRuntime::force_initialization(CHECK_JNI_ERR);
-    }
+  if (force_JVMCI_intialization) {
+    JVMCIRuntime::force_initialization(CHECK_JNI_ERR);
+    CompileBroker::compilation_init_phase2();
   }
 #endif
 
@@ -4253,11 +4271,6 @@
     VMThread::destroy();
   }
 
-  // clean up ideal graph printers
-#if defined(COMPILER2) && !defined(PRODUCT)
-  IdealGraphPrinter::clean_up();
-#endif
-
   // Now, all Java threads are gone except daemon threads. Daemon threads
   // running Java code or in VM are stopped by the Safepoint. However,
   // daemon threads executing native code are still running.  But they
@@ -4266,6 +4279,16 @@
 
   VM_Exit::set_vm_exited();
 
+  // Clean up ideal graph printers after the VMThread has started
+  // the final safepoint which will block all the Compiler threads.
+  // Note that this Thread has already logically exited so the
+  // clean_up() function's use of a JavaThreadIteratorWithHandle
+  // would be a problem except set_vm_exited() has remembered the
+  // shutdown thread which is granted a policy exception.
+#if defined(COMPILER2) && !defined(PRODUCT)
+  IdealGraphPrinter::clean_up();
+#endif
+
   notify_vm_shutdown();
 
   // We are after VM_Exit::set_vm_exited() so we can't call
--- a/src/hotspot/share/runtime/threadSMR.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/threadSMR.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -28,6 +28,7 @@
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadSMR.inline.hpp"
+#include "runtime/vm_operations.hpp"
 #include "services/threadService.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/globalDefinitions.hpp"
@@ -469,6 +470,16 @@
 
 ThreadsListHandle::ThreadsListHandle(Thread *self) : _list(ThreadsSMRSupport::acquire_stable_list(self, /* is_ThreadsListSetter */ false)), _self(self) {
   assert(self == Thread::current(), "sanity check");
+  // Threads::threads_do() is used by the Thread-SMR protocol to visit all
+  // Threads in the system which ensures the safety of the ThreadsList
+  // managed by this ThreadsListHandle, but JavaThreads that are not on
+  // the Threads list cannot be included in that visit. The JavaThread that
+  // calls Threads::destroy_vm() is exempt from this check because it has
+  // to logically exit as part of the shutdown procedure. This is safe
+  // because VM_Exit::_shutdown_thread is not set until after the VMThread
+  // has started the final safepoint which holds the Threads_lock for the
+  // remainder of the VM's life.
+  assert(!self->is_Java_thread() || self == VM_Exit::shutdown_thread() || (((JavaThread*)self)->on_thread_list() && !((JavaThread*)self)->is_terminated()), "JavaThread must be on the Threads list to use a ThreadsListHandle");
   if (EnableThreadSMRStatistics) {
     _timer.start();
   }
--- a/src/hotspot/share/runtime/unhandledOops.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/unhandledOops.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -24,7 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "memory/universe.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/thread.hpp"
--- a/src/hotspot/share/runtime/vframeArray.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/vframeArray.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -40,6 +40,7 @@
 #include "runtime/vframe.hpp"
 #include "runtime/vframeArray.hpp"
 #include "runtime/vframe_hp.hpp"
+#include "utilities/copy.hpp"
 #include "utilities/events.hpp"
 #ifdef COMPILER2
 #include "opto/runtime.hpp"
--- a/src/hotspot/share/runtime/vmStructs.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/vmStructs.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -2261,10 +2261,10 @@
                                                                           \
   declare_constant(G1CardTable::g1_young_gen)                             \
                                                                           \
-  declare_constant(CollectedHeap::SerialHeap)                             \
-  declare_constant(CollectedHeap::CMSHeap)                                \
-  declare_constant(CollectedHeap::ParallelScavengeHeap)                   \
-  declare_constant(CollectedHeap::G1CollectedHeap)                        \
+  declare_constant(CollectedHeap::Serial)                                 \
+  declare_constant(CollectedHeap::Parallel)                               \
+  declare_constant(CollectedHeap::CMS)                                    \
+  declare_constant(CollectedHeap::G1)                                     \
                                                                           \
   /* constants from Generation::Name enum */                              \
                                                                           \
--- a/src/hotspot/share/runtime/vmThread.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/vmThread.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -576,6 +576,31 @@
   }
 }
 
+// A SkipGCALot object is used to elide the usual effect of gc-a-lot
+// over a section of execution by a thread. Currently, it's used only to
+// prevent re-entrant calls to GC.
+class SkipGCALot : public StackObj {
+  private:
+   bool _saved;
+   Thread* _t;
+
+  public:
+#ifdef ASSERT
+    SkipGCALot(Thread* t) : _t(t) {
+      _saved = _t->skip_gcalot();
+      _t->set_skip_gcalot(true);
+    }
+
+    ~SkipGCALot() {
+      assert(_t->skip_gcalot(), "Save-restore protocol invariant");
+      _t->set_skip_gcalot(_saved);
+    }
+#else
+    SkipGCALot(Thread* t) { }
+    ~SkipGCALot() { }
+#endif
+};
+
 void VMThread::execute(VM_Operation* op) {
   Thread* t = Thread::current();
 
--- a/src/hotspot/share/runtime/vm_operations.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/vm_operations.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -417,7 +417,7 @@
 }
 
 volatile bool VM_Exit::_vm_exited = false;
-Thread * VM_Exit::_shutdown_thread = NULL;
+Thread * volatile VM_Exit::_shutdown_thread = NULL;
 
 int VM_Exit::set_vm_exited() {
 
--- a/src/hotspot/share/runtime/vm_operations.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/runtime/vm_operations.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -459,7 +459,7 @@
  private:
   int  _exit_code;
   static volatile bool _vm_exited;
-  static Thread * _shutdown_thread;
+  static Thread * volatile _shutdown_thread;
   static void wait_if_vm_exited();
  public:
   VM_Exit(int exit_code) {
@@ -468,6 +468,7 @@
   static int wait_for_threads_in_native_to_block();
   static int set_vm_exited();
   static bool vm_exited()                      { return _vm_exited; }
+  static Thread * shutdown_thread()            { return _shutdown_thread; }
   static void block_if_vm_exited() {
     if (_vm_exited) {
       wait_if_vm_exited();
--- a/src/hotspot/share/services/allocationSite.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/services/allocationSite.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_SERVICES_ALLOCATION_SITE_HPP
 #define SHARE_VM_SERVICES_ALLOCATION_SITE_HPP
 
-#include "memory/allocation.hpp"
 #include "utilities/nativeCallStack.hpp"
 
 // Allocation site represents a code path that makes a memory
--- a/src/hotspot/share/services/diagnosticCommand.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/services/diagnosticCommand.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -104,6 +104,7 @@
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<CodeListDCmd>(full_export, true, false));
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<CodeCacheDCmd>(full_export, true, false));
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<TouchedMethodsDCmd>(full_export, true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<CodeHeapAnalyticsDCmd>(full_export, true, false));
 
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<CompilerDirectivesPrintDCmd>(full_export, true, false));
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<CompilerDirectivesAddDCmd>(full_export, true, false));
@@ -920,6 +921,31 @@
   CodeCache::print_layout(output());
 }
 
+//---<  BEGIN  >--- CodeHeap State Analytics.
+CodeHeapAnalyticsDCmd::CodeHeapAnalyticsDCmd(outputStream* output, bool heap) :
+                                             DCmdWithParser(output, heap),
+  _function("function", "Function to be performed (aggregate, UsedSpace, FreeSpace, MethodCount, MethodSpace, MethodAge, discard", "STRING", false, "all"),
+  _granularity("granularity", "Detail level - smaller value -> more detail", "STRING", false, "4096") {
+  _dcmdparser.add_dcmd_argument(&_function);
+  _dcmdparser.add_dcmd_argument(&_granularity);
+}
+
+void CodeHeapAnalyticsDCmd::execute(DCmdSource source, TRAPS) {
+  CompileBroker::print_heapinfo(output(), _function.value(), _granularity.value());
+}
+
+int CodeHeapAnalyticsDCmd::num_arguments() {
+  ResourceMark rm;
+  CodeHeapAnalyticsDCmd* dcmd = new CodeHeapAnalyticsDCmd(NULL, false);
+  if (dcmd != NULL) {
+    DCmdMark mark(dcmd);
+    return dcmd->_dcmdparser.num_arguments();
+  } else {
+    return 0;
+  }
+}
+//---<  END  >--- CodeHeap State Analytics.
+
 void CompilerDirectivesPrintDCmd::execute(DCmdSource source, TRAPS) {
   DirectivesStack::print(output());
 }
--- a/src/hotspot/share/services/diagnosticCommand.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/services/diagnosticCommand.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -641,6 +641,33 @@
   virtual void execute(DCmdSource source, TRAPS);
 };
 
+//---<  BEGIN  >--- CodeHeap State Analytics.
+class CodeHeapAnalyticsDCmd : public DCmdWithParser {
+protected:
+  DCmdArgument<char*> _function;
+  DCmdArgument<char*> _granularity;
+public:
+  CodeHeapAnalyticsDCmd(outputStream* output, bool heap);
+  static const char* name() {
+    return "Compiler.CodeHeap_Analytics";
+  }
+  static const char* description() {
+    return "Print CodeHeap analytics";
+  }
+  static const char* impact() {
+    return "Low: Depends on code heap size and content. "
+           "Holds CodeCache_lock during analysis step, usually sub-second duration.";
+  }
+  static const JavaPermission permission() {
+    JavaPermission p = {"java.lang.management.ManagementPermission",
+                        "monitor", NULL};
+    return p;
+  }
+  static int num_arguments();
+  virtual void execute(DCmdSource source, TRAPS);
+};
+//---<  END  >--- CodeHeap State Analytics.
+
 class CompilerDirectivesPrintDCmd : public DCmd {
 public:
   CompilerDirectivesPrintDCmd(outputStream* output, bool heap) : DCmd(output, heap) {}
--- a/src/hotspot/share/services/heapDumper.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/services/heapDumper.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,7 +27,7 @@
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/vmGCOperations.hpp"
 #include "memory/allocation.inline.hpp"
@@ -41,7 +41,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/jniHandles.hpp"
-#include "runtime/os.hpp"
+#include "runtime/os.inline.hpp"
 #include "runtime/reflectionUtils.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadSMR.hpp"
--- a/src/hotspot/share/services/memBaseline.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/services/memBaseline.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,7 +27,6 @@
 
 #if INCLUDE_NMT
 
-#include "memory/allocation.hpp"
 #include "runtime/mutex.hpp"
 #include "services/mallocSiteTable.hpp"
 #include "services/mallocTracker.hpp"
--- a/src/hotspot/share/services/memoryManager.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/services/memoryManager.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -27,6 +27,7 @@
 
 #include "gc/shared/gcCause.hpp"
 #include "memory/allocation.hpp"
+#include "oops/oop.hpp"
 #include "oops/oopsHierarchy.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/timer.hpp"
@@ -68,7 +69,7 @@
 
   void add_pool(MemoryPool* pool);
 
-  bool is_manager(instanceHandle mh)     { return mh() == _memory_mgr_obj; }
+  bool is_manager(instanceHandle mh)     { return oopDesc::equals(mh(), _memory_mgr_obj); }
 
   virtual instanceOop get_memory_manager_instance(TRAPS);
   virtual bool is_gc_memory_manager()    { return false; }
--- a/src/hotspot/share/services/memoryPool.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/services/memoryPool.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,6 +26,7 @@
 #define SHARE_VM_SERVICES_MEMORYPOOL_HPP
 
 #include "memory/heap.hpp"
+#include "oops/oop.hpp"
 #include "services/memoryUsage.hpp"
 #include "utilities/macros.hpp"
 
@@ -92,7 +93,7 @@
   // max size could be changed
   virtual size_t max_size()    const       { return _max_size; }
 
-  bool is_pool(instanceHandle pool) { return (pool() == _memory_pool_obj); }
+  bool is_pool(instanceHandle pool) { return oopDesc::equals(pool(), _memory_pool_obj); }
 
   bool available_for_allocation()   { return _available_for_allocation; }
   bool set_available_for_allocation(bool value) {
--- a/src/hotspot/share/services/memoryService.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/services/memoryService.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -29,6 +29,7 @@
 #include "logging/logConfiguration.hpp"
 #include "memory/heap.hpp"
 #include "memory/memRegion.hpp"
+#include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/handles.inline.hpp"
--- a/src/hotspot/share/services/threadService.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/services/threadService.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -607,7 +607,7 @@
     for (int j = 0; j < len; j++) {
       oop monitor = locked_monitors->at(j);
       assert(monitor != NULL, "must be a Java object");
-      if (monitor == object) {
+      if (oopDesc::equals(monitor, object)) {
         found = true;
         break;
       }
--- a/src/hotspot/share/trace/traceEventClasses.xsl	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/trace/traceEventClasses.xsl	Fri Apr 13 03:05:19 2018 +0200
@@ -143,7 +143,8 @@
     }
   }
 
-  using TraceEvent::commit; // else commit() is hidden by overloaded versions in this class
+  using <xsl:value-of select="concat('TraceEvent&lt;Event', @id, '&gt;')"/>::commit; // else commit() is hidden by overloaded versions in this class
+
 <xsl:variable name="instant" select="@is_instant"/>
 <!-- non static method (only for non instant events)-->
 <xsl:if test="$instant='false'">
--- a/src/hotspot/share/utilities/accessFlags.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/utilities/accessFlags.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,11 +26,12 @@
 #define SHARE_VM_UTILITIES_ACCESSFLAGS_HPP
 
 #include "jvm.h"
-#include "memory/allocation.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/macros.hpp"
 
 // AccessFlags is an abstraction over Java access flags.
 
+class outputStream;
 
 enum {
   // See jvm.h for shared JVM_ACC_XXX access flags
--- a/src/hotspot/share/utilities/constantTag.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/utilities/constantTag.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,7 +26,7 @@
 #define SHARE_VM_UTILITIES_CONSTANTTAG_HPP
 
 #include "jvm.h"
-#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
 
 // constant tags in Java .class files
 
--- a/src/hotspot/share/utilities/debug.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/utilities/debug.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -54,11 +54,20 @@
 #include "utilities/defaultStream.hpp"
 #include "utilities/events.hpp"
 #include "utilities/formatBuffer.hpp"
+#include "utilities/globalDefinitions.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/vmError.hpp"
 
 #include <stdio.h>
 
+// Support for showing register content on asserts/guarantees.
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+static char g_dummy;
+char* g_assert_poison = &g_dummy;
+static intx g_asserting_thread = 0;
+static void* g_assertion_context = NULL;
+#endif // CAN_SHOW_REGISTERS_ON_ASSERT
+
 #ifndef ASSERT
 #  ifdef _DEBUG
    // NOTE: don't turn the lines below into a comment -- if you're getting
@@ -212,7 +221,13 @@
   if (Debugging || error_is_suppressed(file, line)) return;
   va_list detail_args;
   va_start(detail_args, detail_fmt);
-  VMError::report_and_die(Thread::current_or_null(), file, line, error_msg, detail_fmt, detail_args);
+  void* context = NULL;
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if (g_assertion_context != NULL && os::current_thread_id() == g_asserting_thread) {
+    context = g_assertion_context;
+  }
+#endif // CAN_SHOW_REGISTERS_ON_ASSERT
+  VMError::report_and_die(Thread::current_or_null(), context, file, line, error_msg, detail_fmt, detail_args);
   va_end(detail_args);
 }
 
@@ -226,7 +241,13 @@
   if (Debugging || error_is_suppressed(file, line)) return;
   va_list detail_args;
   va_start(detail_args, detail_fmt);
-  VMError::report_and_die(Thread::current_or_null(), file, line, "fatal error", detail_fmt, detail_args);
+  void* context = NULL;
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if (g_assertion_context != NULL && os::current_thread_id() == g_asserting_thread) {
+    context = g_assertion_context;
+  }
+#endif // CAN_SHOW_REGISTERS_ON_ASSERT
+  VMError::report_and_die(Thread::current_or_null(), context, file, line, "fatal error", detail_fmt, detail_args);
   va_end(detail_args);
 }
 
@@ -676,3 +697,50 @@
 };
 
 #endif // !PRODUCT
+
+// Support for showing register content on asserts/guarantees.
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+
+static ucontext_t g_stored_assertion_context;
+
+void initialize_assert_poison() {
+  char* page = os::reserve_memory(os::vm_page_size());
+  if (page) {
+    if (os::commit_memory(page, os::vm_page_size(), false) &&
+        os::protect_memory(page, os::vm_page_size(), os::MEM_PROT_NONE)) {
+      g_assert_poison = page;
+    }
+  }
+}
+
+static bool store_context(const void* context) {
+  if (memcpy(&g_stored_assertion_context, context, sizeof(ucontext_t)) == false) {
+    return false;
+  }
+#if defined(__linux) && defined(PPC64)
+  // on Linux ppc64, ucontext_t contains pointers into itself which have to be patched up
+  //  after copying the context (see comment in sys/ucontext.h):
+  *((void**) &g_stored_assertion_context.uc_mcontext.regs) = &(g_stored_assertion_context.uc_mcontext.gp_regs);
+#endif
+  return true;
+}
+
+bool handle_assert_poison_fault(const void* ucVoid, const void* faulting_address) {
+  if (faulting_address == g_assert_poison) {
+    // Disarm poison page.
+    os::protect_memory((char*)g_assert_poison, os::vm_page_size(), os::MEM_PROT_RWX);
+    // Store Context away.
+    if (ucVoid) {
+      const intx my_tid = os::current_thread_id();
+      if (Atomic::cmpxchg(my_tid, &g_asserting_thread, (intx)0) == 0) {
+        if (store_context(ucVoid)) {
+          g_assertion_context = &g_stored_assertion_context;
+        }
+      }
+    }
+    return true;
+  }
+  return false;
+}
+#endif // CAN_SHOW_REGISTERS_ON_ASSERT
+
--- a/src/hotspot/share/utilities/debug.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/utilities/debug.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,17 @@
 
 #include <stddef.h>
 
+// ShowRegistersOnAssert support (for now Linux only)
+#if defined(LINUX) && !defined(ZERO)
+#define CAN_SHOW_REGISTERS_ON_ASSERT
+extern char* g_assert_poison;
+#define TOUCH_ASSERT_POISON (*g_assert_poison) = 'X';
+void initialize_assert_poison();
+bool handle_assert_poison_fault(const void* ucVoid, const void* faulting_address);
+#else
+#define TOUCH_ASSERT_POISON
+#endif // CAN_SHOW_REGISTERS_ON_ASSERT
+
 // assertions
 #ifndef ASSERT
 #define vmassert(p, ...)
@@ -42,6 +53,7 @@
 #define vmassert(p, ...)                                                       \
 do {                                                                           \
   if (!(p)) {                                                                  \
+    TOUCH_ASSERT_POISON;                                                       \
     if (is_executing_unit_tests()) {                                           \
       report_assert_msg(__VA_ARGS__);                                          \
     }                                                                          \
@@ -67,6 +79,7 @@
 #define vmassert_status(p, status, msg) \
 do {                                                                           \
   if (!(p)) {                                                                  \
+    TOUCH_ASSERT_POISON;                                                       \
     report_vm_status_error(__FILE__, __LINE__, "assert(" #p ") failed",        \
                            status, msg);                                       \
     BREAKPOINT;                                                                \
@@ -83,6 +96,7 @@
 #define guarantee(p, ...)                                                         \
 do {                                                                              \
   if (!(p)) {                                                                     \
+    TOUCH_ASSERT_POISON;                                                          \
     report_vm_error(__FILE__, __LINE__, "guarantee(" #p ") failed", __VA_ARGS__); \
     BREAKPOINT;                                                                   \
   }                                                                               \
@@ -90,6 +104,7 @@
 
 #define fatal(...)                                                                \
 do {                                                                              \
+  TOUCH_ASSERT_POISON;                                                            \
   report_fatal(__FILE__, __LINE__, __VA_ARGS__);                                  \
   BREAKPOINT;                                                                     \
 } while (0)
@@ -103,18 +118,21 @@
 
 #define ShouldNotCallThis()                                                       \
 do {                                                                              \
+  TOUCH_ASSERT_POISON;                                                            \
   report_should_not_call(__FILE__, __LINE__);                                     \
   BREAKPOINT;                                                                     \
 } while (0)
 
 #define ShouldNotReachHere()                                                      \
 do {                                                                              \
+  TOUCH_ASSERT_POISON;                                                            \
   report_should_not_reach_here(__FILE__, __LINE__);                               \
   BREAKPOINT;                                                                     \
 } while (0)
 
 #define Unimplemented()                                                           \
 do {                                                                              \
+  TOUCH_ASSERT_POISON;                                                            \
   report_unimplemented(__FILE__, __LINE__);                                       \
   BREAKPOINT;                                                                     \
 } while (0)
--- a/src/hotspot/share/utilities/exceptions.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/utilities/exceptions.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -443,9 +443,9 @@
 volatile int Exceptions::_out_of_memory_error_class_metaspace_errors = 0;
 
 void Exceptions::count_out_of_memory_exceptions(Handle exception) {
-  if (exception() == Universe::out_of_memory_error_metaspace()) {
+  if (oopDesc::equals(exception(), Universe::out_of_memory_error_metaspace())) {
      Atomic::inc(&_out_of_memory_error_metaspace_errors);
-  } else if (exception() == Universe::out_of_memory_error_class_metaspace()) {
+  } else if (oopDesc::equals(exception(), Universe::out_of_memory_error_class_metaspace())) {
      Atomic::inc(&_out_of_memory_error_class_metaspace_errors);
   } else {
      // everything else reported as java heap OOM
--- a/src/hotspot/share/utilities/globalDefinitions_visCPP.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/utilities/globalDefinitions_visCPP.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -139,6 +139,7 @@
 #pragma warning( disable : 4201 ) // nonstandard extension used : nameless struct/union (needed in windows.h)
 #pragma warning( disable : 4511 ) // copy constructor could not be generated
 #pragma warning( disable : 4291 ) // no matching operator delete found; memory will not be freed if initialization thows an exception
+#pragma warning( disable : 4351 ) // new behavior: elements of array ... will be default initialized
 #ifdef CHECK_UNHANDLED_OOPS
 #pragma warning( disable : 4521 ) // class has multiple copy ctors of a single type
 #pragma warning( disable : 4522 ) // class has multiple assignment operators of a single type
--- a/src/hotspot/share/utilities/growableArray.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/utilities/growableArray.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -26,6 +26,7 @@
 #define SHARE_VM_UTILITIES_GROWABLEARRAY_HPP
 
 #include "memory/allocation.hpp"
+#include "oops/oop.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/ostream.hpp"
@@ -211,6 +212,15 @@
 
   void print();
 
+  inline static bool safe_equals(oop obj1, oop obj2) {
+    return oopDesc::equals(obj1, obj2);
+  }
+
+  template <class X>
+  inline static bool safe_equals(X i1, X i2) {
+    return i1 == i2;
+  }
+
   int append(const E& elem) {
     check_nesting();
     if (_len == _max) grow(_len);
@@ -295,7 +305,7 @@
 
   bool contains(const E& elem) const {
     for (int i = 0; i < _len; i++) {
-      if (_data[i] == elem) return true;
+      if (safe_equals(_data[i], elem)) return true;
     }
     return false;
   }
--- a/src/hotspot/share/utilities/ostream.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/utilities/ostream.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -28,7 +28,7 @@
 #include "memory/allocation.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/os.hpp"
+#include "runtime/os.inline.hpp"
 #include "runtime/vm_version.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/macros.hpp"
--- a/src/hotspot/share/utilities/sizes.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/utilities/sizes.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_UTILITIES_SIZES_HPP
 #define SHARE_VM_UTILITIES_SIZES_HPP
 
-#include "memory/allocation.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 // The following two classes are used to represent 'sizes' and 'offsets' in the VM;
--- a/src/hotspot/share/utilities/vmError.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/utilities/vmError.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1238,10 +1238,10 @@
   report_and_die(message, "%s", "");
 }
 
-void VMError::report_and_die(Thread* thread, const char* filename, int lineno, const char* message,
+void VMError::report_and_die(Thread* thread, void* context, const char* filename, int lineno, const char* message,
                              const char* detail_fmt, va_list detail_args)
 {
-  report_and_die(INTERNAL_ERROR, message, detail_fmt, detail_args, thread, NULL, NULL, NULL, filename, lineno, 0);
+  report_and_die(INTERNAL_ERROR, message, detail_fmt, detail_args, thread, NULL, NULL, context, filename, lineno, 0);
 }
 
 void VMError::report_and_die(Thread* thread, const char* filename, int lineno, size_t size,
@@ -1674,24 +1674,24 @@
   // Case 16 is tested by test/hotspot/jtreg/runtime/ErrorHandling/ThreadsListHandleInErrorHandlingTest.java.
   // Case 17 is tested by test/hotspot/jtreg/runtime/ErrorHandling/NestedThreadsListHandleInErrorHandlingTest.java.
   switch (how) {
-    case  1: vmassert(str == NULL, "expected null");
+    case  1: vmassert(str == NULL, "expected null"); break;
     case  2: vmassert(num == 1023 && *str == 'X',
-                      "num=" SIZE_FORMAT " str=\"%s\"", num, str);
-    case  3: guarantee(str == NULL, "expected null");
+                      "num=" SIZE_FORMAT " str=\"%s\"", num, str); break;
+    case  3: guarantee(str == NULL, "expected null"); break;
     case  4: guarantee(num == 1023 && *str == 'X',
-                       "num=" SIZE_FORMAT " str=\"%s\"", num, str);
-    case  5: fatal("expected null");
-    case  6: fatal("num=" SIZE_FORMAT " str=\"%s\"", num, str);
+                       "num=" SIZE_FORMAT " str=\"%s\"", num, str); break;
+    case  5: fatal("expected null"); break;
+    case  6: fatal("num=" SIZE_FORMAT " str=\"%s\"", num, str); break;
     case  7: fatal("%s%s#    %s%s#    %s%s#    %s%s#    %s%s#    "
                    "%s%s#    %s%s#    %s%s#    %s%s#    %s%s#    "
                    "%s%s#    %s%s#    %s%s#    %s%s#    %s",
                    msg, eol, msg, eol, msg, eol, msg, eol, msg, eol,
                    msg, eol, msg, eol, msg, eol, msg, eol, msg, eol,
-                   msg, eol, msg, eol, msg, eol, msg, eol, msg);
-    case  8: vm_exit_out_of_memory(num, OOM_MALLOC_ERROR, "ChunkPool::allocate");
-    case  9: ShouldNotCallThis();
-    case 10: ShouldNotReachHere();
-    case 11: Unimplemented();
+                   msg, eol, msg, eol, msg, eol, msg, eol, msg); break;
+    case  8: vm_exit_out_of_memory(num, OOM_MALLOC_ERROR, "ChunkPool::allocate"); break;
+    case  9: ShouldNotCallThis(); break;
+    case 10: ShouldNotReachHere(); break;
+    case 11: Unimplemented(); break;
     // There's no guarantee the bad data pointer will crash us
     // so "break" out to the ShouldNotReachHere().
     case 12: *dataPtr = '\0'; break;
@@ -1714,6 +1714,7 @@
 
     default: tty->print_cr("ERROR: %d: unexpected test_num value.", how);
   }
+  tty->print_cr("VMError::controlled_crash: survived intentional crash. Did you suppress the assert?");
   ShouldNotReachHere();
 }
 #endif // !PRODUCT
--- a/src/hotspot/share/utilities/vmError.hpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/hotspot/share/utilities/vmError.hpp	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -158,8 +158,8 @@
   static void report_and_die(Thread* thread, unsigned int sig, address pc,
                              void* siginfo, void* context);
 
-  static void report_and_die(Thread* thread,const char* filename, int lineno, const char* message,
-                             const char* detail_fmt, va_list detail_args) ATTRIBUTE_PRINTF(5, 0);
+  static void report_and_die(Thread* thread, void* context, const char* filename, int lineno, const char* message,
+                             const char* detail_fmt, va_list detail_args) ATTRIBUTE_PRINTF(6, 0);
 
   static void report_and_die(Thread* thread, const char* filename, int lineno, size_t size,
                              VMErrorType vm_err_type, const char* detail_fmt,
--- a/src/java.base/share/classes/java/lang/invoke/MemberName.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/java.base/share/classes/java/lang/invoke/MemberName.java	Fri Apr 13 03:05:19 2018 +0200
@@ -1047,7 +1047,8 @@
          *  If lookup fails or access is not permitted, null is returned.
          *  Otherwise a fresh copy of the given member is returned, with modifier bits filled in.
          */
-        private MemberName resolve(byte refKind, MemberName ref, Class<?> lookupClass) {
+        private MemberName resolve(byte refKind, MemberName ref, Class<?> lookupClass,
+                                   boolean speculativeResolve) {
             MemberName m = ref.clone();  // JVM will side-effect the ref
             assert(refKind == m.getReferenceKind());
             try {
@@ -1066,7 +1067,10 @@
                 //
                 // REFC view on PTYPES doesn't matter, since it is used only as a starting point for resolution and doesn't
                 // participate in method selection.
-                m = MethodHandleNatives.resolve(m, lookupClass);
+                m = MethodHandleNatives.resolve(m, lookupClass, speculativeResolve);
+                if (m == null && speculativeResolve) {
+                    return null;
+                }
                 m.checkForTypeAlias(m.getDeclaringClass());
                 m.resolution = null;
             } catch (ClassNotFoundException | LinkageError ex) {
@@ -1091,7 +1095,7 @@
         MemberName resolveOrFail(byte refKind, MemberName m, Class<?> lookupClass,
                                  Class<NoSuchMemberException> nsmClass)
                 throws IllegalAccessException, NoSuchMemberException {
-            MemberName result = resolve(refKind, m, lookupClass);
+            MemberName result = resolve(refKind, m, lookupClass, false);
             if (result.isResolved())
                 return result;
             ReflectiveOperationException ex = result.makeAccessException();
@@ -1106,8 +1110,8 @@
          */
         public
         MemberName resolveOrNull(byte refKind, MemberName m, Class<?> lookupClass) {
-            MemberName result = resolve(refKind, m, lookupClass);
-            if (result.isResolved())
+            MemberName result = resolve(refKind, m, lookupClass, true);
+            if (result != null && result.isResolved())
                 return result;
             return null;
         }
--- a/src/java.base/share/classes/java/lang/invoke/MethodHandleNatives.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/java.base/share/classes/java/lang/invoke/MethodHandleNatives.java	Fri Apr 13 03:05:19 2018 +0200
@@ -49,7 +49,8 @@
 
     static native void init(MemberName self, Object ref);
     static native void expand(MemberName self);
-    static native MemberName resolve(MemberName self, Class<?> caller) throws LinkageError, ClassNotFoundException;
+    static native MemberName resolve(MemberName self, Class<?> caller,
+            boolean speculativeResolve) throws LinkageError, ClassNotFoundException;
     static native int getMembers(Class<?> defc, String matchName, String matchSig,
             int matchFlags, Class<?> caller, int skip, MemberName[] results);
 
--- a/src/java.base/solaris/native/libjvm_db/libjvm_db.c	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/java.base/solaris/native/libjvm_db/libjvm_db.c	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1393,6 +1393,7 @@
     bcp          = (uintptr_t) regs[R_L1];
     methodPtr = (uintptr_t) regs[R_L2];
     sender_sp = regs[R_I5];
+    fp = (uintptr_t) regs[R_FP];
     if (debug > 2) {
         fprintf(stderr, "\nregs[R_I1]=%lx, regs[R_I2]=%lx, regs[R_I5]=%lx, regs[R_L1]=%lx, regs[R_L2]=%lx\n",
                          regs[R_I1], regs[R_I2], regs[R_I5], regs[R_L1], regs[R_L2]);
--- a/src/java.instrument/share/classes/sun/instrument/InstrumentationImpl.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/java.instrument/share/classes/sun/instrument/InstrumentationImpl.java	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -161,6 +161,9 @@
             throw new UnsupportedOperationException(
               "retransformClasses is not supported in this environment");
         }
+        if (classes.length == 0) {
+            return; // no-op
+        }
         retransformClasses0(mNativeAgent, classes);
     }
 
--- a/src/jdk.attach/windows/classes/sun/tools/attach/VirtualMachineImpl.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/jdk.attach/windows/classes/sun/tools/attach/VirtualMachineImpl.java	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -80,9 +80,19 @@
         assert args.length <= 3;        // includes null
 
         // create a pipe using a random name
-        int r = (new Random()).nextInt();
-        String pipename = "\\\\.\\pipe\\javatool" + r;
-        long hPipe = createPipe(pipename);
+        Random rnd = new Random();
+        int r = rnd.nextInt();
+        String pipeprefix = "\\\\.\\pipe\\javatool";
+        String pipename = pipeprefix + r;
+        long hPipe;
+        try {
+            hPipe = createPipe(pipename);
+        } catch (IOException ce) {
+            // Retry with another random pipe name.
+            r = rnd.nextInt();
+            pipename = pipeprefix + r;
+            hPipe = createPipe(pipename);
+        }
 
         // check if we are detached - in theory it's possible that detach is invoked
         // after this check but before we enqueue the command.
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CMSHeap.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CMSHeap.java	Fri Apr 13 03:05:19 2018 +0200
@@ -35,6 +35,6 @@
   }
 
   public CollectedHeapName kind() {
-    return CollectedHeapName.CMS_HEAP;
+    return CollectedHeapName.CMS;
   }
 }
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/G1CollectedHeap.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/G1CollectedHeap.java	Fri Apr 13 03:05:19 2018 +0200
@@ -125,7 +125,7 @@
     }
 
     public CollectedHeapName kind() {
-        return CollectedHeapName.G1_COLLECTED_HEAP;
+        return CollectedHeapName.G1;
     }
 
     @Override
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/parallel/ParallelScavengeHeap.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/parallel/ParallelScavengeHeap.java	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -85,7 +85,7 @@
    }
 
    public CollectedHeapName kind() {
-      return CollectedHeapName.PARALLEL_SCAVENGE_HEAP;
+      return CollectedHeapName.PARALLEL;
    }
 
    public void printOn(PrintStream tty) {
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/serial/SerialHeap.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/serial/SerialHeap.java	Fri Apr 13 03:05:19 2018 +0200
@@ -35,6 +35,6 @@
   }
 
   public CollectedHeapName kind() {
-    return CollectedHeapName.SERIAL_HEAP;
+    return CollectedHeapName.SERIAL;
   }
 }
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/CollectedHeapName.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/CollectedHeapName.java	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,11 +31,10 @@
 
   private CollectedHeapName(String name) { this.name = name; }
 
-  public static final CollectedHeapName GEN_COLLECTED_HEAP = new CollectedHeapName("GenCollectedHeap");
-  public static final CollectedHeapName CMS_HEAP = new CollectedHeapName("CMSHeap");
-  public static final CollectedHeapName SERIAL_HEAP = new CollectedHeapName("SerialHeap");
-  public static final CollectedHeapName G1_COLLECTED_HEAP = new CollectedHeapName("G1CollectedHeap");
-  public static final CollectedHeapName PARALLEL_SCAVENGE_HEAP = new CollectedHeapName("ParallelScavengeHeap");
+  public static final CollectedHeapName SERIAL = new CollectedHeapName("Serial");
+  public static final CollectedHeapName PARALLEL = new CollectedHeapName("Parallel");
+  public static final CollectedHeapName CMS = new CollectedHeapName("CMS");
+  public static final CollectedHeapName G1 = new CollectedHeapName("G1");
 
   public String toString() {
     return name;
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GenCollectedHeap.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GenCollectedHeap.java	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,7 @@
 import sun.jvm.hotspot.types.*;
 import sun.jvm.hotspot.utilities.*;
 
-public class GenCollectedHeap extends CollectedHeap {
+abstract public class GenCollectedHeap extends CollectedHeap {
   private static AddressField youngGenField;
   private static AddressField oldGenField;
 
@@ -134,10 +134,6 @@
     }
   }
 
-  public CollectedHeapName kind() {
-    return CollectedHeapName.GEN_COLLECTED_HEAP;
-  }
-
   public void printOn(PrintStream tty) {
     for (int i = 0; i < nGens(); i++) {
       tty.print("Gen " + i + ": ");
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.aarch64/src/jdk/vm/ci/aarch64/AArch64.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.aarch64/src/jdk/vm/ci/aarch64/AArch64.java	Fri Apr 13 03:05:19 2018 +0200
@@ -171,6 +171,8 @@
         SHA1,
         SHA2,
         CRC32,
+        LSE,
+        STXR_PREFETCH,
         A53MAC,
         DMB_ATOMICS
     }
@@ -183,7 +185,11 @@
     public enum Flag {
         UseBarriersForVolatile,
         UseCRC32,
-        UseNeon
+        UseNeon,
+        UseSIMDForMemoryOps,
+        AvoidUnalignedAccesses,
+        UseLSE,
+        UseBlockZeroing
     }
 
     private final EnumSet<Flag> flags;
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.aarch64/src/jdk/vm/ci/hotspot/aarch64/AArch64HotSpotJVMCIBackendFactory.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.aarch64/src/jdk/vm/ci/hotspot/aarch64/AArch64HotSpotJVMCIBackendFactory.java	Fri Apr 13 03:05:19 2018 +0200
@@ -46,11 +46,72 @@
     protected EnumSet<AArch64.CPUFeature> computeFeatures(@SuppressWarnings("unused") AArch64HotSpotVMConfig config) {
         // Configure the feature set using the HotSpot flag settings.
         EnumSet<AArch64.CPUFeature> features = EnumSet.noneOf(AArch64.CPUFeature.class);
+
+        if ((config.vmVersionFeatures & config.aarch64FP) != 0) {
+            features.add(AArch64.CPUFeature.FP);
+        }
+        if ((config.vmVersionFeatures & config.aarch64ASIMD) != 0) {
+            features.add(AArch64.CPUFeature.ASIMD);
+        }
+        if ((config.vmVersionFeatures & config.aarch64EVTSTRM) != 0) {
+            features.add(AArch64.CPUFeature.EVTSTRM);
+        }
+        if ((config.vmVersionFeatures & config.aarch64AES) != 0) {
+            features.add(AArch64.CPUFeature.AES);
+        }
+        if ((config.vmVersionFeatures & config.aarch64PMULL) != 0) {
+            features.add(AArch64.CPUFeature.PMULL);
+        }
+        if ((config.vmVersionFeatures & config.aarch64SHA1) != 0) {
+            features.add(AArch64.CPUFeature.SHA1);
+        }
+        if ((config.vmVersionFeatures & config.aarch64SHA2) != 0) {
+            features.add(AArch64.CPUFeature.SHA2);
+        }
+        if ((config.vmVersionFeatures & config.aarch64CRC32) != 0) {
+            features.add(AArch64.CPUFeature.CRC32);
+        }
+        if ((config.vmVersionFeatures & config.aarch64LSE) != 0) {
+            features.add(AArch64.CPUFeature.LSE);
+        }
+        if ((config.vmVersionFeatures & config.aarch64STXR_PREFETCH) != 0) {
+            features.add(AArch64.CPUFeature.STXR_PREFETCH);
+        }
+        if ((config.vmVersionFeatures & config.aarch64A53MAC) != 0) {
+            features.add(AArch64.CPUFeature.A53MAC);
+        }
+        if ((config.vmVersionFeatures & config.aarch64DMB_ATOMICS) != 0) {
+            features.add(AArch64.CPUFeature.DMB_ATOMICS);
+        }
+
         return features;
     }
 
     protected EnumSet<AArch64.Flag> computeFlags(@SuppressWarnings("unused") AArch64HotSpotVMConfig config) {
         EnumSet<AArch64.Flag> flags = EnumSet.noneOf(AArch64.Flag.class);
+
+        if (config.useBarriersForVolatile) {
+            flags.add(AArch64.Flag.UseBarriersForVolatile);
+        }
+        if (config.useCRC32) {
+            flags.add(AArch64.Flag.UseCRC32);
+        }
+        if (config.useNeon) {
+            flags.add(AArch64.Flag.UseNeon);
+        }
+        if (config.useSIMDForMemoryOps) {
+            flags.add(AArch64.Flag.UseSIMDForMemoryOps);
+        }
+        if (config.avoidUnalignedAccesses) {
+            flags.add(AArch64.Flag.AvoidUnalignedAccesses);
+        }
+        if (config.useLSE) {
+            flags.add(AArch64.Flag.UseLSE);
+        }
+        if (config.useBlockZeroing) {
+            flags.add(AArch64.Flag.UseBlockZeroing);
+        }
+
         return flags;
     }
 
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.aarch64/src/jdk/vm/ci/hotspot/aarch64/AArch64HotSpotVMConfig.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.aarch64/src/jdk/vm/ci/hotspot/aarch64/AArch64HotSpotVMConfig.java	Fri Apr 13 03:05:19 2018 +0200
@@ -39,4 +39,35 @@
     final boolean linuxOs = System.getProperty("os.name", "").startsWith("Linux");
 
     final boolean useCompressedOops = getFlag("UseCompressedOops", Boolean.class);
+
+    // CPU Capabilities
+
+    /*
+     * These flags are set based on the corresponding command line flags.
+     */
+    final boolean useBarriersForVolatile = getFlag("UseBarriersForVolatile", Boolean.class);
+    final boolean useCRC32 = getFlag("UseCRC32", Boolean.class);
+    final boolean useNeon = getFlag("UseNeon", Boolean.class);
+    final boolean useSIMDForMemoryOps = getFlag("UseSIMDForMemoryOps", Boolean.class);
+    final boolean avoidUnalignedAccesses = getFlag("AvoidUnalignedAccesses", Boolean.class);
+    final boolean useLSE = getFlag("UseLSE", Boolean.class);
+    final boolean useBlockZeroing = getFlag("UseBlockZeroing", Boolean.class);
+
+    final long vmVersionFeatures = getFieldValue("Abstract_VM_Version::_features", Long.class, "uint64_t");
+
+    /*
+     * These flags are set if the corresponding support is in the hardware.
+     */
+    final long aarch64FP = getConstant("VM_Version::CPU_FP", Long.class);
+    final long aarch64ASIMD = getConstant("VM_Version::CPU_ASIMD", Long.class);
+    final long aarch64EVTSTRM = getConstant("VM_Version::CPU_EVTSTRM", Long.class);
+    final long aarch64AES = getConstant("VM_Version::CPU_AES", Long.class);
+    final long aarch64PMULL = getConstant("VM_Version::CPU_PMULL", Long.class);
+    final long aarch64SHA1 = getConstant("VM_Version::CPU_SHA1", Long.class);
+    final long aarch64SHA2 = getConstant("VM_Version::CPU_SHA2", Long.class);
+    final long aarch64CRC32 = getConstant("VM_Version::CPU_CRC32", Long.class);
+    final long aarch64LSE = getConstant("VM_Version::CPU_LSE", Long.class);
+    final long aarch64STXR_PREFETCH = getConstant("VM_Version::CPU_STXR_PREFETCH", Long.class);
+    final long aarch64A53MAC = getConstant("VM_Version::CPU_A53MAC", Long.class);
+    final long aarch64DMB_ATOMICS = getConstant("VM_Version::CPU_DMB_ATOMICS", Long.class);
 }
--- a/src/jdk.jdi/share/native/libdt_shmem/shmemBase.c	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/jdk.jdi/share/native/libdt_shmem/shmemBase.c	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -404,25 +404,25 @@
 createStream(char *name, Stream *stream)
 {
     jint error;
-    char prefix[MAX_IPC_PREFIX];
+    char objectName[MAX_IPC_NAME];
 
-    sprintf(prefix, "%s.mutex", name);
-    error = createWithGeneratedName(prefix, stream->shared->mutexName,
+    sprintf(objectName, "%s.mutex", name);
+    error = createWithGeneratedName(objectName, stream->shared->mutexName,
                                     createMutex, &stream->mutex);
     if (error != SYS_OK) {
         return error;
     }
 
-    sprintf(prefix, "%s.hasData", name);
-    error = createWithGeneratedName(prefix, stream->shared->hasDataEventName,
+    sprintf(objectName, "%s.hasData", name);
+    error = createWithGeneratedName(objectName, stream->shared->hasDataEventName,
                                     createEvent, &stream->hasData);
     if (error != SYS_OK) {
         (void)closeStream(stream, JNI_FALSE);
         return error;
     }
 
-    sprintf(prefix, "%s.hasSpace", name);
-    error = createWithGeneratedName(prefix, stream->shared->hasSpaceEventName,
+    sprintf(objectName, "%s.hasSpace", name);
+    error = createWithGeneratedName(objectName, stream->shared->hasSpaceEventName,
                                     createEvent, &stream->hasSpace);
     if (error != SYS_OK) {
         (void)closeStream(stream, JNI_FALSE);
@@ -598,7 +598,7 @@
                  SharedMemoryConnection **connectionPtr)
 {
     jint error;
-    char streamPrefix[MAX_IPC_NAME];
+    char streamName[MAX_IPC_NAME];
 
     SharedMemoryConnection *connection = allocConnection();
     if (connection == NULL) {
@@ -619,17 +619,17 @@
     connection->incoming.shared = &connection->shared->toServer;
     connection->outgoing.shared = &connection->shared->toClient;
 
-    strcpy(streamPrefix, connection->name);
-    strcat(streamPrefix, ".ctos");
-    error = createStream(streamPrefix, &connection->incoming);
+    strcpy(streamName, connection->name);
+    strcat(streamName, ".ctos");
+    error = createStream(streamName, &connection->incoming);
     if (error != SYS_OK) {
         closeConnection(connection);
         return error;
     }
 
-    strcpy(streamPrefix, connection->name);
-    strcat(streamPrefix, ".stoc");
-    error = createStream(streamPrefix, &connection->outgoing);
+    strcpy(streamName, connection->name);
+    strcat(streamName, ".stoc");
+    error = createStream(streamName, &connection->outgoing);
     if (error != SYS_OK) {
         closeConnection(connection);
         return error;
@@ -746,9 +746,7 @@
 {
     SharedMemoryTransport *transport;
     jint error;
-    char prefix[MAX_IPC_PREFIX];
-
-
+    char objectName[MAX_IPC_NAME];
 
     transport = allocTransport();
     if (transport == NULL) {
@@ -784,24 +782,24 @@
     memset(transport->shared, 0, sizeof(SharedListener));
     transport->shared->acceptingPID = sysProcessGetID();
 
-    sprintf(prefix, "%s.mutex", transport->name);
-    error = createWithGeneratedName(prefix, transport->shared->mutexName,
+    sprintf(objectName, "%s.mutex", transport->name);
+    error = createWithGeneratedName(objectName, transport->shared->mutexName,
                                     createMutex, &transport->mutex);
     if (error != SYS_OK) {
         closeTransport(transport);
         return error;
     }
 
-    sprintf(prefix, "%s.accept", transport->name);
-    error = createWithGeneratedName(prefix, transport->shared->acceptEventName,
+    sprintf(objectName, "%s.accept", transport->name);
+    error = createWithGeneratedName(objectName, transport->shared->acceptEventName,
                                     createEvent, &transport->acceptEvent);
     if (error != SYS_OK) {
         closeTransport(transport);
         return error;
     }
 
-    sprintf(prefix, "%s.attach", transport->name);
-    error = createWithGeneratedName(prefix, transport->shared->attachEventName,
+    sprintf(objectName, "%s.attach", transport->name);
+    error = createWithGeneratedName(objectName, transport->shared->attachEventName,
                                     createEvent, &transport->attachEvent);
     if (error != SYS_OK) {
         closeTransport(transport);
--- a/src/linux/doc/man/java.1	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/linux/doc/man/java.1	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 '\" t
-.\" Copyright (c) 1994, 2015, Oracle and/or its affiliates. All rights reserved.
+.\" Copyright (c) 1994, 2018, Oracle and/or its affiliates. All rights reserved.
 .\" DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 .\"
 .\" This code is free software; you can redistribute it and/or modify it
@@ -1173,65 +1173,6 @@
 .PP
 These options control the runtime behavior of the Java HotSpot VM\&.
 .PP
-\-XX:+CheckEndorsedAndExtDirs
-.RS 4
-Enables the option to prevent the
-\fBjava\fR
-command from running a Java application if it uses the endorsed\-standards override mechanism or the extension mechanism\&. This option checks if an application is using one of these mechanisms by checking the following:
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The
-\fBjava\&.ext\&.dirs\fR
-or
-\fBjava\&.endorsed\&.dirs\fR
-system property is set\&.
-.RE
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The
-\fBlib/endorsed\fR
-directory exists and is not empty\&.
-.RE
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The
-\fBlib/ext\fR
-directory contains any JAR files other than those of the JDK\&.
-.RE
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The system\-wide platform\-specific extension directory contains any JAR files\&.
-.RE
-.RE
-.PP
 \-XX:+DisableAttachMechanism
 .RS 4
 Enables the option that disables the mechanism that lets tools attach to the JVM\&. By default, this option is disabled, meaning that the attach mechanism is enabled and you can use tools such as
--- a/src/solaris/doc/sun/man/man1/java.1	Fri Apr 13 09:06:37 2018 +0800
+++ b/src/solaris/doc/sun/man/man1/java.1	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 '\" t
-.\" Copyright (c) 1994, 2015, Oracle and/or its affiliates. All rights reserved.
+.\" Copyright (c) 1994, 2018, Oracle and/or its affiliates. All rights reserved.
 .\" DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 .\"
 .\" This code is free software; you can redistribute it and/or modify it
@@ -1173,65 +1173,6 @@
 .PP
 These options control the runtime behavior of the Java HotSpot VM\&.
 .PP
-\-XX:+CheckEndorsedAndExtDirs
-.RS 4
-Enables the option to prevent the
-\fBjava\fR
-command from running a Java application if it uses the endorsed\-standards override mechanism or the extension mechanism\&. This option checks if an application is using one of these mechanisms by checking the following:
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The
-\fBjava\&.ext\&.dirs\fR
-or
-\fBjava\&.endorsed\&.dirs\fR
-system property is set\&.
-.RE
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The
-\fBlib/endorsed\fR
-directory exists and is not empty\&.
-.RE
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The
-\fBlib/ext\fR
-directory contains any JAR files other than those of the JDK\&.
-.RE
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The system\-wide platform\-specific extension directory contains any JAR files\&.
-.RE
-.RE
-.PP
 \-XX:+DisableAttachMechanism
 .RS 4
 Enables the option that disables the mechanism that lets tools attach to the JVM\&. By default, this option is disabled, meaning that the attach mechanism is enabled and you can use tools such as
--- a/test/fmw/gtest/src/gtest.cc	Fri Apr 13 09:06:37 2018 +0800
+++ b/test/fmw/gtest/src/gtest.cc	Fri Apr 13 03:05:19 2018 +0200
@@ -49,6 +49,9 @@
 #include <ostream>  // NOLINT
 #include <sstream>
 #include <vector>
+#if defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5140
+#pragma error_messages(off, SEC_NULL_PTR_DEREF)
+#endif
 
 #if GTEST_OS_LINUX
 
--- a/test/hotspot/gtest/gc/g1/test_g1HeapVerifier.cpp	Fri Apr 13 09:06:37 2018 +0800
+++ b/test/hotspot/gtest/gc/g1/test_g1HeapVerifier.cpp	Fri Apr 13 03:05:19 2018 +0200
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/g1/g1Arguments.hpp"
 #include "gc/g1/g1HeapVerifier.hpp"
 #include "logging/logConfiguration.hpp"
 #include "logging/logTestFixture.hpp"
@@ -32,50 +33,48 @@
 };
 
 TEST_F(G1HeapVerifierTest, parse) {
-  G1HeapVerifier verifier(NULL);
-
   LogConfiguration::configure_stdout(LogLevel::Off, true, LOG_TAGS(gc, verify));
 
   // Default is to verify everything.
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyAll));
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyYoungOnly));
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyInitialMark));
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyMixed));
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyRemark));
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyCleanup));
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyFull));
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyAll));
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyYoungOnly));
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyInitialMark));
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyMixed));
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyRemark));
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyCleanup));
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyFull));
 
   // Setting one will disable all other.
-  verifier.parse_verification_type("full");
-  ASSERT_FALSE(verifier.should_verify(G1HeapVerifier::G1VerifyAll));
-  ASSERT_FALSE(verifier.should_verify(G1HeapVerifier::G1VerifyYoungOnly));
-  ASSERT_FALSE(verifier.should_verify(G1HeapVerifier::G1VerifyInitialMark));
-  ASSERT_FALSE(verifier.should_verify(G1HeapVerifier::G1VerifyMixed));
-  ASSERT_FALSE(verifier.should_verify(G1HeapVerifier::G1VerifyRemark));
-  ASSERT_FALSE(verifier.should_verify(G1HeapVerifier::G1VerifyCleanup));
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyFull));
+  G1Arguments::parse_verification_type("full");
+  ASSERT_FALSE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyAll));
+  ASSERT_FALSE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyYoungOnly));
+  ASSERT_FALSE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyInitialMark));
+  ASSERT_FALSE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyMixed));
+  ASSERT_FALSE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyRemark));
+  ASSERT_FALSE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyCleanup));
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyFull));
 
   // Verify case sensitivity.
-  verifier.parse_verification_type("YOUNG-ONLY");
-  ASSERT_FALSE(verifier.should_verify(G1HeapVerifier::G1VerifyYoungOnly));
-  verifier.parse_verification_type("young-only");
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyYoungOnly));
+  G1Arguments::parse_verification_type("YOUNG-ONLY");
+  ASSERT_FALSE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyYoungOnly));
+  G1Arguments::parse_verification_type("young-only");
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyYoungOnly));
 
   // Verify perfect match
-  verifier.parse_verification_type("mixedgc");
-  ASSERT_FALSE(verifier.should_verify(G1HeapVerifier::G1VerifyMixed));
-  verifier.parse_verification_type("mixe");
-  ASSERT_FALSE(verifier.should_verify(G1HeapVerifier::G1VerifyMixed));
-  verifier.parse_verification_type("mixed");
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyMixed));
+  G1Arguments::parse_verification_type("mixedgc");
+  ASSERT_FALSE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyMixed));
+  G1Arguments::parse_verification_type("mixe");
+  ASSERT_FALSE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyMixed));
+  G1Arguments::parse_verification_type("mixed");
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyMixed));
 
   // Verify the last three
-  verifier.parse_verification_type("initial-mark");
-  verifier.parse_verification_type("remark");
-  verifier.parse_verification_type("cleanup");
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyRemark));
-  ASSERT_TRUE(verifier.should_verify(G1HeapVerifier::G1VerifyCleanup));
+  G1Arguments::parse_verification_type("initial-mark");
+  G1Arguments::parse_verification_type("remark");
+  G1Arguments::parse_verification_type("cleanup");
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyRemark));
+  ASSERT_TRUE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyCleanup));
 
   // Enabling all is not the same as G1VerifyAll
-  ASSERT_FALSE(verifier.should_verify(G1HeapVerifier::G1VerifyAll));
+  ASSERT_FALSE(G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyAll));
 }
--- a/test/hotspot/jtreg/ProblemList-graal.txt	Fri Apr 13 09:06:37 2018 +0800
+++ b/test/hotspot/jtreg/ProblemList-graal.txt	Fri Apr 13 03:05:19 2018 +0200
@@ -47,10 +47,6 @@
 compiler/jvmci/TestValidateModules.java                         8194942   generic-all
 gc/arguments/TestVerifyBeforeAndAfterGCFlags.java               8194942   generic-all
 
-compiler/rangechecks/TestRangeCheckSmearing.java                8195632   generic-all
-compiler/uncommontrap/Test8009761.java                          8195632   generic-all
-compiler/whitebox/ForceNMethodSweepTest.java                    8195632   generic-all
-
 compiler/unsafe/UnsafeGetConstantField.java                     8181833   generic-all
 compiler/unsafe/UnsafeGetStableArrayElement.java                8181833   generic-all
 compiler/unsafe/UnsafeOffHeapBooleanTest.java                   8181833   generic-all
@@ -70,12 +66,10 @@
 
 gc/TestNUMAPageSize.java                                        8194949   generic-all
 
-runtime/appcds/UseAppCDS.java                                   8196626   generic-all
-
 runtime/ReservedStack/ReservedStackTestCompiler.java            8181855   generic-all
 
 serviceability/jvmti/GetModulesInfo/JvmtiGetAllModulesTest.java 8195156   generic-all
 
-compiler/compilercontrol/directives/LogTest.java                8197446   generic-all
+compiler/compilercontrol/directives/LogTest.java                8181753   generic-all
 
 gc/g1/ihop/TestIHOPStatic.java                                  8199486   generic-all
--- a/test/hotspot/jtreg/ProblemList.txt	Fri Apr 13 09:06:37 2018 +0800
+++ b/test/hotspot/jtreg/ProblemList.txt	Fri Apr 13 03:05:19 2018 +0200
@@ -62,7 +62,6 @@
 gc/survivorAlignment/TestPromotionToSurvivor.java 8129886 generic-all
 gc/g1/logging/TestG1LoggingFailure.java 8169634 generic-all
 gc/g1/humongousObjects/TestHeapCounters.java 8178918 generic-all
-gc/g1/TestVerifyGCType.java 8193067 generic-all
 gc/stress/gclocker/TestGCLockerWithParallel.java 8180622 generic-all
 gc/stress/gclocker/TestGCLockerWithG1.java 8180622 generic-all
 gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java 8177765 generic-all
--- a/test/hotspot/jtreg/TEST.groups	Fri Apr 13 09:06:37 2018 +0800
+++ b/test/hotspot/jtreg/TEST.groups	Fri Apr 13 03:05:19 2018 +0200
@@ -143,14 +143,15 @@
   :tier1_gc_1 \
   :tier1_gc_2 \
   :tier1_gc_gcold \
-  :tier1_gc_gcbasher 
+  :tier1_gc_gcbasher
 
 hotspot_not_fast_gc = \
   :hotspot_gc \
   -:tier1_gc
 
 tier1_gc_1 = \
-  gc/g1/
+  gc/g1/ \
+  -gc/g1/ihop/TestIHOPErgo.java
 
 tier1_gc_2 = \
   sanity/ExecuteInternalVMTests.java \
@@ -219,7 +220,8 @@
  -runtime/containers/ \
   sanity/ \
   testlibrary_tests/TestMutuallyExclusivePlatformPredicates.java \
- -:tier1_runtime_appcds_exclude
+ -:tier1_runtime_appcds_exclude \
+ -runtime/signal
 
 hotspot_cds = \
   runtime/SharedArchiveFile/ \
@@ -243,7 +245,9 @@
 tier1_serviceability = \
   serviceability/dcmd/compiler \
   serviceability/logging \
-  serviceability/sa
+  serviceability/sa \
+  -serviceability/sa/ClhsdbScanOops.java \
+  -serviceability/sa/TestHeapDumpForLargeArray.java
 
 tier1 = \
   :tier1_common \
@@ -260,7 +264,8 @@
  -runtime/containers/ \
  -:tier1_runtime \
  -:tier1_serviceability \
- -:hotspot_tier2_runtime_platform_agnostic
+ -:hotspot_tier2_runtime_platform_agnostic \
+ -runtime/signal
 
 hotspot_tier2_runtime_platform_agnostic = \
   runtime/SelectionResolution \
@@ -289,4 +294,3 @@
   -:tier1_runtime_appcds_exclude \
   -:hotspot_nmt \
   -:hotspot_tier2_runtime_platform_agnostic
-
--- a/test/hotspot/jtreg/compiler/intrinsics/string/TestStringIntrinsicRangeChecks.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/test/hotspot/jtreg/compiler/intrinsics/string/TestStringIntrinsicRangeChecks.java	Fri Apr 13 03:05:19 2018 +0200
@@ -29,7 +29,7 @@
  * @summary Verifies that string intrinsics throw array out of bounds exceptions.
  * @library /compiler/patches /test/lib
  * @build java.base/java.lang.Helper
- * @run main/othervm -Xbatch -XX:CompileThreshold=100 -XX:-TieredCompilation compiler.intrinsics.string.TestStringIntrinsicRangeChecks
+ * @run main/othervm -Xbatch -XX:CompileThreshold=100 compiler.intrinsics.string.TestStringIntrinsicRangeChecks
  */
 package compiler.intrinsics.string;
 
--- a/test/hotspot/jtreg/compiler/types/TestMeetIncompatibleInterfaceArrays.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/test/hotspot/jtreg/compiler/types/TestMeetIncompatibleInterfaceArrays.java	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015 SAP SE. All rights reserved.
+ * Copyright (c) 2018 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,9 +25,10 @@
  * @test
  * @bug 8141551
  * @summary C2 can not handle returns with inccompatible interface arrays
+ * @requires vm.compMode == "Xmixed" & vm.flavor == "server"
  * @modules java.base/jdk.internal.org.objectweb.asm
  *          java.base/jdk.internal.misc
- * @library /test/lib
+ * @library /test/lib /
  *
  * @build sun.hotspot.WhiteBox
  * @run driver ClassFileInstaller sun.hotspot.WhiteBox
@@ -37,8 +38,8 @@
  *        -XX:+UnlockDiagnosticVMOptions
  *        -XX:+WhiteBoxAPI
  *        -Xbatch
- *        -XX:CompileThreshold=1
  *        -XX:-TieredCompilation
+ *        -XX:TieredStopAtLevel=4
  *        -XX:CICompilerCount=1
  *        -XX:+PrintCompilation
  *        -XX:+PrintInlining
@@ -51,8 +52,8 @@
  *        -XX:+UnlockDiagnosticVMOptions
  *        -XX:+WhiteBoxAPI
  *        -Xbatch
- *        -XX:CompileThreshold=1
  *        -XX:-TieredCompilation
+ *        -XX:TieredStopAtLevel=4
  *        -XX:CICompilerCount=1
  *        -XX:+PrintCompilation
  *        -XX:+PrintInlining
@@ -65,11 +66,8 @@
  *        -XX:+UnlockDiagnosticVMOptions
  *        -XX:+WhiteBoxAPI
  *        -Xbatch
- *        -XX:CompileThreshold=1
- *        -XX:Tier0InvokeNotifyFreqLog=0 -XX:Tier2InvokeNotifyFreqLog=0 -XX:Tier3InvokeNotifyFreqLog=0 -XX:Tier23InlineeNotifyFreqLog=0
- *        -XX:Tier3InvocationThreshold=2 -XX:Tier3MinInvocationThreshold=2 -XX:Tier3CompileThreshold=2
- *        -XX:Tier4InvocationThreshold=1 -XX:Tier4MinInvocationThreshold=1 -XX:Tier4CompileThreshold=1
  *        -XX:+TieredCompilation
+ *        -XX:TieredStopAtLevel=4
  *        -XX:CICompilerCount=2
  *        -XX:+PrintCompilation
  *        -XX:+PrintInlining
@@ -84,6 +82,7 @@
 
 package compiler.types;
 
+import compiler.whitebox.CompilerWhiteBoxTest;
 import jdk.internal.org.objectweb.asm.ClassWriter;
 import jdk.internal.org.objectweb.asm.MethodVisitor;
 import sun.hotspot.WhiteBox;
@@ -190,8 +189,8 @@
      *     return Helper.createI2Array3(); // returns I1[][][] which gives a verifier error because return expects I1[][][][]
      *   }
      *   public static void test() {
-     *     I1[][][][][] i1 = run();
-     *     System.out.println(i1[0][0][0][0][0].getName());
+     *     I1[][][][] i1 = run();
+     *     System.out.println(i1[0][0][0][0].getName());
      *   }
      * ...
      * public class MeetIncompatibleInterfaceArrays5ASM {
@@ -306,9 +305,25 @@
 
     }
 
-    public static String[][] tier = { { "interpreted", "C2 (tier 4) without inlining", "C2 (tier4) without inlining" },
-            { "interpreted", "C2 (tier 4) with inlining", "C2 (tier4) with inlining" },
-            { "interpreted", "C1 (tier 3) with inlining", "C2 (tier4) with inlining" } };
+    public static String[][] tier = { { "interpreted (tier 0)",
+                                        "C2 (tier 4) without inlining",
+                                        "C2 (tier 4) without inlining" },
+                                      { "interpreted (tier 0)",
+                                        "C2 (tier 4) with inlining",
+                                        "C2 (tier 4) with inlining" },
+                                      { "interpreted (tier 0)",
+                                        "C1 (tier 3) with inlining",
+                                        "C2 (tier 4) with inlining" } };
+
+    public static int[][] level = { { CompilerWhiteBoxTest.COMP_LEVEL_NONE,
+                                      CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION,
+                                      CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION },
+                                    { CompilerWhiteBoxTest.COMP_LEVEL_NONE,
+                                      CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION,
+                                      CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION },
+                                    { CompilerWhiteBoxTest.COMP_LEVEL_NONE,
+                                      CompilerWhiteBoxTest.COMP_LEVEL_FULL_PROFILE,
+                                      CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION } };
 
     public static void main(String[] args) throws Exception {
         final int pass = Integer.parseInt(args.length > 0 ? args[0] : "0");
@@ -344,8 +359,11 @@
                 Method m = c.getMethod("test");
                 Method r = c.getMethod("run");
                 for (int j = 0; j < 3; j++) {
-                    System.out.println((j + 1) + ". invokation of " + baseClassName + i + "ASM.test() [should be "
-                            + tier[pass][j] + "]");
+                    System.out.println((j + 1) + ". invokation of " + baseClassName + i + "ASM.test() [::" +
+                                       r.getName() + "() should be '" + tier[pass][j] + "' compiled]");
+
+                    WB.enqueueMethodForCompilation(r, level[pass][j]);
+
                     try {
                         m.invoke(null);
                     } catch (InvocationTargetException ite) {
@@ -360,10 +378,17 @@
                             }
                         }
                     }
-                }
-                System.out.println("Method " + r + (WB.isMethodCompiled(r) ? " has" : " has not") + " been compiled.");
-                if (!WB.isMethodCompiled(r)) {
-                    throw new Exception("Method " + r + " must be compiled!");
+
+                    int r_comp_level = WB.getMethodCompilationLevel(r);
+                    System.out.println("   invokation of " + baseClassName + i + "ASM.test() [::" +
+                                       r.getName() + "() was compiled at tier " + r_comp_level + "]");
+
+                    if (r_comp_level != level[pass][j]) {
+                      throw new Exception("Method " + r + " must be compiled at tier " + level[pass][j] +
+                                          " but was compiled at " + r_comp_level + " instead!");
+                    }
+
+                    WB.deoptimizeMethod(r);
                 }
             }
         }
--- a/test/hotspot/jtreg/gc/concurrent_phase_control/TestConcurrentPhaseControlG1.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/test/hotspot/jtreg/gc/concurrent_phase_control/TestConcurrentPhaseControlG1.java	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,9 +53,7 @@
         {"MARK_FROM_ROOTS", "Concurrent Mark From Roots"},
         {"BEFORE_REMARK", null},
         {"REMARK", "Pause Remark"},
-        {"CREATE_LIVE_DATA", "Concurrent Create Live Data"},
-        // "COMPLETE_CLEANUP",  -- optional phase, not reached by this test
-        {"CLEANUP_FOR_NEXT_MARK", "Concurrent Cleanup for Next Mark"},
+        {"REBUILD_REMEMBERED_SETS", "Concurrent Rebuild Remembered Sets"},
         // Clear request
         {"IDLE", null},
         {"ANY", null},
--- a/test/hotspot/jtreg/gc/concurrent_phase_control/TestConcurrentPhaseControlG1Basics.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/test/hotspot/jtreg/gc/concurrent_phase_control/TestConcurrentPhaseControlG1Basics.java	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,8 +53,7 @@
         "MARK_FROM_ROOTS",
         "BEFORE_REMARK",
         "REMARK",
-        "CREATE_LIVE_DATA",
-        "COMPLETE_CLEANUP",
+        "REBUILD_REMEMBERED_SETS",
         "CLEANUP_FOR_NEXT_MARK",
     };
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/g1/TestFromCardCacheIndex.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,120 @@
+/*
+ * @test TestFromCardCacheIndex.java
+ * @bug 8196485
+ * @summary Ensure that G1 does not miss a remembered set entry due to from card cache default value indices.
+ * @key gc
+ * @requires vm.gc.G1
+ * @requires vm.debug
+ * @requires vm.bits != "32"
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ * @build sun.hotspot.WhiteBox
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:. -Xms20M -Xmx20M -XX:+UseCompressedOops -XX:G1HeapRegionSize=1M -XX:HeapBaseMinAddress=2199011721216 -XX:+UseG1GC -verbose:gc TestFromCardCacheIndex
+ */
+
+import sun.hotspot.WhiteBox;
+
+/**
+ * Repeatedly tries to generate references from objects that contained a card with the same index
+ * of the from card cache default value.
+ */
+public class TestFromCardCacheIndex {
+    private static WhiteBox WB;
+
+    // Shift value to calculate card indices from addresses.
+    private static final int CardSizeShift = 9;
+
+    /**
+     * Returns the last address on the heap within the object.
+     *
+     * @param The Object array to get the last address from.
+     */
+    private static long getObjectLastAddress(Object[] o) {
+        return WB.getObjectAddress(o) + WB.getObjectSize(o) - 1;
+    }
+
+    /**
+     * Returns the (truncated) 32 bit card index for the given address.
+     *
+     * @param The address to get the 32 bit card index from.
+     */
+    private static int getCardIndex32bit(long address) {
+        return (int)(address >> CardSizeShift);
+    }
+
+    // The source arrays that are placed on the heap in old gen.
+    private static int numArrays = 7000;
+    private static int arraySize = 508;
+    // Size of a humongous byte array, a bit less than a 1M region. This makes sure
+    // that we always create a cross-region reference when referencing it.
+    private static int byteArraySize = 1024*1023;
+
+    public static void main(String[] args) {
+        WB = sun.hotspot.WhiteBox.getWhiteBox();
+        for (int i = 0; i < 5; i++) {
+          runTest();
+          WB.fullGC();
+        }
+    }
+
+    public static void runTest() {
+        System.out.println("Starting test");
+
+        // Spray the heap with random object arrays in the hope that we get one
+        // at the proper place.
+        Object[][] arrays = new Object[numArrays][];
+        for (int i = 0; i < numArrays; i++) {
+            arrays[i] = new Object[arraySize];
+        }
+
+        // Make sure that everything is in old gen.
+        WB.fullGC();
+
+        // Find if we got an allocation at the right spot.
+        Object[] arrayWithCardMinus1 = findArray(arrays);
+
+        if (arrayWithCardMinus1 == null) {
+            System.out.println("Array with card -1 not found. Trying again.");
+            return;
+        } else {
+            System.out.println("Array with card -1 found.");
+        }
+
+        System.out.println("Modifying the last card in the array with a new object in a different region...");
+        // Create a target object that is guaranteed to be in a different region.
+        byte[] target = new byte[byteArraySize];
+
+        // Modify the last entry of the object we found.
+        arrayWithCardMinus1[arraySize - 1] = target;
+
+        target = null;
+        // Make sure that the dirty cards are flushed by doing a GC.
+        System.out.println("Doing a GC.");
+        WB.youngGC();
+
+        System.out.println("The crash didn't reproduce. Trying again.");
+    }
+
+    /**
+     * Finds an returns an array that contains a (32 bit truncated) card with value -1.
+     */
+    private static Object[] findArray(Object[][] arrays) {
+        for (int i = 0; i < arrays.length; i++) {
+            Object[] target = arrays[i];
+            if (target == null) {
+                continue;
+            }
+            final long startAddress = WB.getObjectAddress(target);
+            final long lastAddress = getObjectLastAddress(target);
+            final int card = getCardIndex32bit(lastAddress);
+            if (card == -1) {
+                Object[] foundArray = target;
+                return foundArray;
+            }
+        }
+        return null;
+    }
+}
+
--- a/test/hotspot/jtreg/gc/g1/TestVerifyGCType.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/test/hotspot/jtreg/gc/g1/TestVerifyGCType.java	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,7 +53,6 @@
         testFullAndRemark();
         testConcurrentMark();
         testBadVerificationType();
-        testUnsupportedCollector();
     }
 
     private static void testAllVerificationEnabled() throws Exception {
@@ -127,14 +126,6 @@
         verifyCollection("Pause Full", true, true, true, output.getStdout());
     }
 
-    private static void testUnsupportedCollector() throws Exception {
-        OutputAnalyzer output;
-        // Test bad gc
-        output = testWithBadGC();
-        output.shouldHaveExitValue(0);
-        output.shouldMatch("VerifyGCType is not supported by this collector.");
-    }
-
     private static OutputAnalyzer testWithVerificationType(String[] types) throws Exception {
         ArrayList<String> basicOpts = new ArrayList<>();
         Collections.addAll(basicOpts, new String[] {
@@ -145,6 +136,8 @@
                                        "-Xlog:gc,gc+start,gc+verify=info",
                                        "-Xms16m",
                                        "-Xmx16m",
+                                       "-XX:ParallelGCThreads=1",
+                                       "-XX:G1HeapWastePercent=1",
                                        "-XX:+VerifyBeforeGC",
                                        "-XX:+VerifyAfterGC",
                                        "-XX:+VerifyDuringGC"});
@@ -161,17 +154,6 @@
         return analyzer;
     }
 
-    private static OutputAnalyzer testWithBadGC() throws Exception {
-        ProcessBuilder procBuilder =  ProcessTools.createJavaProcessBuilder(new String[] {
-                "-XX:+UseParallelGC",
-                "-XX:+UnlockDiagnosticVMOptions",
-                "-XX:VerifyGCType=full",
-                "-version"});
-
-        OutputAnalyzer analyzer = new OutputAnalyzer(procBuilder.start());
-        return analyzer;
-    }
-
     private static void verifyCollection(String name, boolean expectBefore, boolean expectDuring, boolean expectAfter, String data) {
         CollectionInfo ci = CollectionInfo.parseFirst(name, data);
         Asserts.assertTrue(ci != null, "Expected GC not found: " + name + "\n" + data);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/ErrorHandling/ShowRegistersOnAssertTest.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @bug 8191101
+ * @summary Show Registers on assert/guarantee
+ * @library /test/lib
+ * @requires (vm.debug == true) & (os.family == "linux")
+ * @author Thomas Stuefe (SAP)
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ */
+
+// Note: this test can only run on debug since it relies on VMError::controlled_crash() which
+// only exists in debug builds.
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.InputStreamReader;
+import java.util.regex.Pattern;
+
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.Platform;
+import jdk.test.lib.process.ProcessTools;
+
+public class ShowRegistersOnAssertTest {
+
+    private static void do_test(boolean do_assert, // true - assert, false - guarantee
+        boolean suppress_assert,
+        boolean show_registers_on_assert) throws Exception
+    {
+        System.out.println("Testing " + (suppress_assert ? "suppressed" : "normal") + " " + (do_assert ? "assert" : "guarantee") +
+                           " with " + (show_registers_on_assert ? "-XX:+ShowRegistersOnAssert" : "-XX:-ShowRegistersOnAssert") + "...");
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UnlockDiagnosticVMOptions", "-Xmx100M", "-XX:-CreateCoredumpOnCrash",
+            "-XX:ErrorHandlerTest=" + (do_assert ? "1" : "3"),
+            (suppress_assert ? "-XX:SuppressErrorAt=/vmError.cpp" : ""),
+            (show_registers_on_assert ? "-XX:+ShowRegistersOnAssert" : "-XX:-ShowRegistersOnAssert"),
+            "-version");
+
+        OutputAnalyzer output_detail = new OutputAnalyzer(pb.start());
+
+        if (suppress_assert) {
+            // we should have not have crashed. See VMError::controlled_crash().
+            output_detail.shouldMatch(".*survived intentional crash.*");
+        } else {
+            // we should have crashed with an internal error. We should definitly NOT have crashed with a segfault
+            // (which would be a sign that the assert poison page mechanism does not work).
+            output_detail.shouldMatch("# A fatal error has been detected by the Java Runtime Environment:.*");
+            output_detail.shouldMatch("# +Internal Error.*");
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        // Note: for now, this is only a regression test testing that the addition of ShowRegistersOnAssert does
+        // not break normal assert/guarantee handling. The feature is not implemented on all platforms and really testing
+        // it requires more effort.
+        do_test(false, false, false);
+        do_test(false, false, true);
+        do_test(false, true, false);
+        do_test(false, true, true);
+        do_test(true, false, false);
+        do_test(true, false, true);
+        do_test(true, true, false);
+        do_test(true, true, true);
+    }
+
+}
+
--- a/test/hotspot/jtreg/runtime/SharedArchiveFile/MaxMetaspaceSize.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/test/hotspot/jtreg/runtime/SharedArchiveFile/MaxMetaspaceSize.java	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,7 @@
 /**
  * @test
  * @requires vm.cds
- * @bug 8067187
+ * @bug 8067187 8200078
  * @summary Testing CDS dumping with the -XX:MaxMetaspaceSize=<size> option
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
@@ -50,7 +50,7 @@
       processArgs.add("-XX:MaxMetaspaceSize=1m");
     }
 
-    String msg = "OutOfMemoryError: Metaspace";
+    String msg = "Failed allocating metaspace object";
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(processArgs.toArray(new String[0]));
     CDSTestUtils.executeAndLog(pb, "dump").shouldContain(msg).shouldHaveExitValue(1);
   }
--- a/test/hotspot/jtreg/runtime/appcds/GraalWithLimitedMetaspace.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/test/hotspot/jtreg/runtime/appcds/GraalWithLimitedMetaspace.java	Fri Apr 13 03:05:19 2018 +0200
@@ -125,8 +125,14 @@
             "-XX:MetaspaceSize=12M",
             "-XX:MaxMetaspaceSize=12M"));
 
-        OutputAnalyzer output = TestCommon.executeAndLog(pb, "dump-archive")
-            .shouldHaveExitValue(1)
-            .shouldContain("Failed allocating metaspace object type");
+        OutputAnalyzer output = TestCommon.executeAndLog(pb, "dump-archive");
+        int exitValue = output.getExitValue();
+        if (exitValue == 1) {
+            output.shouldContain("Failed allocating metaspace object type");
+        } else if (exitValue == 0) {
+            output.shouldContain("Loading classes to share");
+        } else {
+            throw new RuntimeException("Unexpected exit value " + exitValue);
+        }
     }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/defineAnonClass/UnsafeDefMeths.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8200261
+ * @summary Tests an anonymous class that implements interfaces with default methods.
+ * @library /testlibrary
+ * @modules java.base/jdk.internal.org.objectweb.asm
+ *          java.management
+ * @compile -XDignore.symbol.file=true UnsafeDefMeths.java
+ * @run main UnsafeDefMeths
+ */
+
+import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.MethodVisitor;
+import jdk.internal.org.objectweb.asm.Type;
+import sun.misc.Unsafe;
+
+import java.lang.invoke.MethodType;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static jdk.internal.org.objectweb.asm.Opcodes.ACC_PRIVATE;
+import static jdk.internal.org.objectweb.asm.Opcodes.ACC_PUBLIC;
+import static jdk.internal.org.objectweb.asm.Opcodes.ACC_SUPER;
+import static jdk.internal.org.objectweb.asm.Opcodes.ALOAD;
+import static jdk.internal.org.objectweb.asm.Opcodes.ARETURN;
+import static jdk.internal.org.objectweb.asm.Opcodes.DUP;
+import static jdk.internal.org.objectweb.asm.Opcodes.GETFIELD;
+import static jdk.internal.org.objectweb.asm.Opcodes.INVOKESPECIAL;
+import static jdk.internal.org.objectweb.asm.Opcodes.PUTFIELD;
+import static jdk.internal.org.objectweb.asm.Opcodes.RETURN;
+import static jdk.internal.org.objectweb.asm.Opcodes.V1_8;
+
+public class UnsafeDefMeths {
+
+    static final Unsafe UNSAFE;
+
+    static {
+        try {
+            Field unsafeField = Unsafe.class.getDeclaredField("theUnsafe");
+            unsafeField.setAccessible(true);
+            UNSAFE = (Unsafe) unsafeField.get(null);
+        }
+        catch (Exception e) {
+            throw new InternalError(e);
+        }
+    }
+
+    interface Resource {
+        Pointer ptr();
+    }
+
+    interface Struct extends Resource {
+       StructPointer ptr();
+    }
+
+    interface Pointer { }
+
+    interface StructPointer extends Pointer { }
+
+    interface I extends Struct {
+        void m();
+    }
+
+    static String IMPL_PREFIX = "$$impl";
+    static String PTR_FIELD_NAME = "ptr";
+
+    public static void main(String[] args) throws Throwable {
+        byte[] bytes = new UnsafeDefMeths().generate(I.class);
+        Class<?> cl = UNSAFE.defineAnonymousClass(I.class, bytes, new Object[0]);
+        I i = (I)cl.getConstructors()[0].newInstance(new Object[] { null }); //exception here!
+    }
+
+    // Generate a class similar to:
+    //
+    // public class UnsafeDefMeths$I$$impl implements UnsafeDefMeths$I, UnsafeDefMeths$Struct {
+    //
+    //     public UnsafeDefMeths$StructPointer ptr;
+    //
+    //     public UnsafeDefMeths$I$$impl(UnsafeDefMeths$StructPointer p) {
+    //         ptr = p;
+    //     }
+    //
+    //     public UnsafeDefMeths$StructPointer ptr() {
+    //         return ptr;
+    //     }
+    // }
+    //
+    byte[] generate(Class<?> iface) {
+        ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_MAXS);
+
+        String ifaceTypeName = Type.getInternalName(iface);
+        String proxyClassName = ifaceTypeName + IMPL_PREFIX;
+        // class definition
+        cw.visit(V1_8, ACC_PUBLIC + ACC_SUPER, proxyClassName,
+                desc(Object.class) + desc(ifaceTypeName) + desc(Struct.class),
+                name(Object.class),
+                new String[] { ifaceTypeName, name(Struct.class) });
+
+        cw.visitField(ACC_PUBLIC, PTR_FIELD_NAME, desc(StructPointer.class), desc(StructPointer.class), null);
+        cw.visitEnd();
+
+        // constructor
+        MethodVisitor mv = cw.visitMethod(ACC_PUBLIC, "<init>",
+                meth(desc(void.class), desc(StructPointer.class)),
+                meth(desc(void.class), desc(StructPointer.class)), null);
+        mv.visitCode();
+        mv.visitVarInsn(ALOAD, 0);
+        mv.visitInsn(DUP);
+        mv.visitMethodInsn(INVOKESPECIAL, name(Object.class), "<init>", meth(desc(void.class)), false);
+        mv.visitVarInsn(ALOAD, 1);
+        // Execution of this PUTFIELD instruction causes the bug's ClassNotFoundException.
+        mv.visitFieldInsn(PUTFIELD, proxyClassName, PTR_FIELD_NAME, desc(StructPointer.class));
+        mv.visitInsn(RETURN);
+        mv.visitMaxs(0, 0);
+        mv.visitEnd();
+
+        // ptr() impl
+        mv = cw.visitMethod(ACC_PUBLIC, PTR_FIELD_NAME, meth(desc(StructPointer.class)),
+                meth(desc(StructPointer.class)), null);
+        mv.visitCode();
+        mv.visitVarInsn(ALOAD, 0);
+        mv.visitFieldInsn(GETFIELD, proxyClassName, PTR_FIELD_NAME, desc(StructPointer.class));
+        mv.visitInsn(ARETURN);
+        mv.visitMaxs(0, 0);
+        mv.visitEnd();
+
+        return cw.toByteArray();
+    }
+
+    String name(Class<?> clazz) {
+        if (clazz.isPrimitive()) {
+            throw new IllegalStateException();
+        } else if (clazz.isArray()) {
+            return desc(clazz);
+        } else {
+            return clazz.getName().replaceAll("\\.", "/");
+        }
+    }
+
+    String desc(Class<?> clazz) {
+        String mdesc = MethodType.methodType(clazz).toMethodDescriptorString();
+        return mdesc.substring(mdesc.indexOf(')') + 1);
+    }
+
+    String desc(String clazzName) {
+        return "L" + clazzName + ";";
+    }
+
+    String gen(String clazz, String... typeargs) {
+        return clazz.substring(0, clazz.length() - 1) + Stream.of(typeargs).collect(Collectors.joining("", "<", ">")) + ";";
+    }
+
+    String meth(String restype, String... argtypes) {
+        return Stream.of(argtypes).collect(Collectors.joining("", "(", ")")) + restype;
+    }
+
+    String meth(Method m) {
+        return MethodType.methodType(m.getReturnType(), m.getParameterTypes()).toMethodDescriptorString();
+    }
+}
--- a/test/hotspot/jtreg/runtime/libadimalloc.solaris.sparc/liboverflow.c	Fri Apr 13 09:06:37 2018 +0800
+++ b/test/hotspot/jtreg/runtime/libadimalloc.solaris.sparc/liboverflow.c	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,9 @@
 #include <string.h>
 #include <unistd.h>
 #include <jni.h>
+#if defined (__SUNPRO_C) && __SUNPRO_C >= 0x5140
+#pragma error_messages(off, SEC_ARR_OUTSIDE_BOUND_READ)
+#endif
 
 #ifdef __cplusplus
 extern "C" {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/README	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,59 @@
+Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
+DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+
+This code is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License version 2 only, as
+published by the Free Software Foundation.
+
+This code is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+version 2 for more details (a copy is included in the LICENSE file that
+accompanied this code).
+
+You should have received a copy of the GNU General Public License version
+2 along with this work; if not, write to the Free Software Foundation,
+Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+or visit www.oracle.com if you need additional information or have any
+questions.
+
+
+Briefly, the tests cover the following scenarios:
+1. prepre
+   set signal handlers -> create JVM -> send signals -> destroy JVM -> check signal handlers were called
+
+2. prepost
+   set signal handlers -> create JVM -> destroy JVM -> send signals  -> check signal handlers were called
+
+3. postpre
+   create JVM ->set signal handlers -> send signals -> destroy JVM -> check signal handlers were called
+
+4. postpost
+   create JVM -> set signal handlers -> destroy JVM -> send signals  -> check signal handlers were called
+
+There is one more scenario called 'nojvm'.
+In this case no jvm is created, so pure signal testing is done.
+
+Signal handlers don't do anything, so the only fact that signal handler was called is checked.
+Also 2 different ways of setting signal handlers are tested: sigaction, sigset.
+
+For 'postpre' and 'postpro' libjsig.so is used to chain signal handlers behind VM installed ones.
+
+=> Current tests cover the following cases (don't count 'nojvm' scenario):
+1. Support for pre-installed signal handlers when the HotSpot VM is created.
+2. Support for signal handler installation after the HotSpot VM is created inside JNI code
+
+
+Notes:
+
+SIGQUIT, SIGTERM, SIGINT, and SIGHUP signals cannot be chained.
+If the application needs to handle these signals, the -Xrs option needs
+to be specified. So, test these signals only with -Xrs flag.
+
+On Linux and Mac OS X, SIGUSR2 is used to implement suspend and resume. So,
+don't test SIGUSR2 on Linux and Mac OS X.
+
+SIGJVM1 and SIGJVM2 exist only on Solaris and are reserved for exclusive use
+by the JVM. So don't test SIGJVM1 and SIGJVM2.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/SigTestDriver.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import jdk.test.lib.Platform;
+import jdk.test.lib.Utils;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+
+import java.io.File;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+public class SigTestDriver {
+    public static void main(String[] args) {
+        // No signal tests on Windows yet; so setting to no-op
+        if (Platform.isWindows()) {
+            System.out.println("SKIPPED: no signal tests on Windows, ignore.");
+            return;
+        }
+
+        // At least one argument should be specified
+        if ( (args == null) || (args.length < 1) ) {
+            throw new IllegalArgumentException("At lease one argument should be specified, the signal name");
+        }
+
+        String signame = args[0];
+        switch (signame) {
+            case "SIGWAITING":
+            case "SIGKILL":
+            case "SIGSTOP": {
+                System.out.println("SKIPPED: signals SIGWAITING, SIGKILL and SIGSTOP can't be tested, ignore.");
+                return;
+            }
+            case "SIGUSR2": {
+                if (Platform.isLinux()) {
+                    System.out.println("SKIPPED: SIGUSR2 can't be tested on Linux, ignore.");
+                    return;
+                } else if (Platform.isOSX()) {
+                    System.out.println("SKIPPED: SIGUSR2 can't be tested on OS X, ignore.");
+                    return;
+                }
+            }
+        }
+
+        Path test = Paths.get(System.getProperty("test.nativepath"))
+                         .resolve("sigtest")
+                         .toAbsolutePath();
+        String envVar = Platform.isWindows() ? "PATH" :
+                (Platform.isOSX() ? "DYLD_LIBRARY_PATH" : "LD_LIBRARY_PATH");
+
+        List<String> cmd = new ArrayList<>();
+        Collections.addAll(cmd,
+                test.toString(),
+                "-sig",
+                signame,
+                "-mode",
+                null, // modeIdx
+                "-scenario",
+                null // scenarioIdx
+        );
+        int modeIdx = 4;
+        int scenarioIdx = 6;
+
+        // add external flags
+        cmd.addAll(vmargs());
+
+        // add test specific arguments w/o signame
+        cmd.addAll(Arrays.asList(args)
+                         .subList(1, args.length));
+
+        boolean passed = true;
+
+        for (String mode : new String[]{"sigset", "sigaction"}) {
+            for (String scenario : new String[] {"nojvm", "prepre", "prepost", "postpre", "postpost"}) {
+                cmd.set(modeIdx, mode);
+                cmd.set(scenarioIdx, scenario);
+                System.out.printf("START TESTING: SIGNAL = %s, MODE = %s, SCENARIO=%s%n",signame, mode, scenario);
+                System.out.printf("Do execute: %s%n", cmd.toString());
+
+                ProcessBuilder pb = new ProcessBuilder(cmd);
+                pb.environment().merge(envVar, jvmLibDir().toString(),
+                        (x, y) -> y + File.pathSeparator + x);
+                pb.environment().put("CLASSPATH", Utils.TEST_CLASS_PATH);
+
+                switch (scenario) {
+                    case "postpre":
+                    case "postpost": {
+                        pb.environment().merge("LD_PRELOAD", libjsig().toString(),
+                                (x, y) -> y + File.pathSeparator + x);
+                    }
+                }
+
+                try {
+                    OutputAnalyzer oa = ProcessTools.executeProcess(pb);
+                    oa.reportDiagnosticSummary();
+                    int exitCode = oa.getExitValue();
+                    if (exitCode == 0) {
+                       System.out.println("PASSED with exit code 0");
+                    } else {
+                        System.out.println("FAILED with exit code " + exitCode);
+                        passed = false;
+                    }
+                } catch (Exception e) {
+                    throw new Error("execution failed", e);
+                }
+            }
+        }
+
+        if (!passed) {
+            throw new Error("test failed");
+        }
+    }
+
+    private static List<String> vmargs() {
+        return Stream.concat(Arrays.stream(Utils.VM_OPTIONS.split(" ")),
+                             Arrays.stream(Utils.JAVA_OPTIONS.split(" ")))
+                     .filter(s -> !s.isEmpty())
+                     .filter(s -> s.startsWith("-X"))
+                     .flatMap(arg -> Stream.of("-vmopt", arg))
+                     .collect(Collectors.toList());
+    }
+
+    private static Path libjsig() {
+        return jvmLibDir().resolve((Platform.isWindows() ? "" : "lib")
+                + "jsig." + Platform.sharedLibraryExt());
+    }
+
+    private static Path jvmLibDir() {
+        Path dir = Paths.get(Utils.TEST_JDK);
+        if (Platform.isWindows()) {
+            return dir.resolve("bin")
+                      .resolve(variant())
+                      .toAbsolutePath();
+        } else {
+            return dir.resolve("lib")
+                      .resolve(variant())
+                      .toAbsolutePath();
+        }
+    }
+
+    private static String variant() {
+        if (Platform.isServer()) {
+            return "server";
+        } else if (Platform.isClient()) {
+            return "client";
+        } else if (Platform.isMinimal()) {
+            return "minimal";
+        } else {
+            throw new Error("TESTBUG: unsupported vm variant");
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigalrm.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigalrm01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGALRM
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigbus.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigbus01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGBUS
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigcld.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigcld01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGCLD
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigcont.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigcont01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGCONT
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigemt.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigemt01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGEMT
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigfpe.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigfpe01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+  * @library /test/lib
+ * @run main/native SigTestDriver SIGFPE
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigfreeze.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigfreeze01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGFREEZE
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSighup.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sighup01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGHUP   -vmopt -XX:+PrintCommandLineFlags -vmopt -Xrs
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigill.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigill01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGILL
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigint.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigint01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGINT   -vmopt -XX:+PrintCommandLineFlags -vmopt -Xrs
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigiot.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigiot01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGIOT
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSiglost.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/siglost01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGLOST
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSiglwp.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/siglwp01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGLWP
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigpipe.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigpipe01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGPIPE
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigpoll.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigpoll01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGPOLL
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigprof.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigprof01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGPROF
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigpwr.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigpwr01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGPWR
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigquit.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigquit01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGQUIT   -vmopt -XX:+PrintCommandLineFlags -vmopt -Xrs
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigsegv.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigsegv01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGSEGV
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigstop.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigstop01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGSTOP
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigsys.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigsys01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGSYS
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigterm.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigterm01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGTERM   -vmopt -XX:+PrintCommandLineFlags -vmopt -Xrs
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigthaw.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigthaw01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGTHAW
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigtrap.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigtrap01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGTRAP
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigtstp.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigtstp01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGTSTP
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigttin.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigttin01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGTTIN
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigttou.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigttou01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGTTOU
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigurg.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigurg01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGURG
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigusr1.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigusr101.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGUSR1
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigusr2.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigusr201.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGUSR2
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigvtalrm.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigvtalrm01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGVTALRM
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigwinch.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigwinch01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGWINCH
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigxcpu.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigxcpu01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGXCPU
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigxfsz.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigxfsz01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGXFSZ
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/TestSigxres.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @requires os.family != "windows"
+ *
+ * @summary converted from VM testbase runtime/signal/sigxres01.
+ * VM testbase keywords: [signal, runtime, linux, solaris, macosx]
+ *
+ * @library /test/lib
+ * @run main/native SigTestDriver SIGXRES
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/signal/exesigtest.c	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,462 @@
+/*
+ * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include <jni.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+
+/*
+ * This is the main program to test the signal chaining/ handling functionality
+ * See bugs 6277077 and 6414402
+ */
+
+#define TRUE  1
+#define FALSE 0
+typedef int boolean;
+
+static JNIEnv *env;
+static JavaVM *vm;
+
+// static int sigid = 0;
+
+// Define the test pass/ fail codes, may be we can use
+// nsk/share/native/native_consts.h in future
+static int TEST_PASSED=0;
+static int TEST_FAILED=1;
+
+// This variable is used to notify whether signal has been received or not.
+static volatile sig_atomic_t sig_received = 0;
+
+static char *mode = 0;
+static char *scenario = 0;
+static char *signal_name;
+static int signal_num = -1;
+
+static JavaVMOption *options = 0;
+static int numOptions = 0;
+
+typedef struct
+{
+    int sigNum;
+    const char* sigName;
+} signalDefinition;
+
+static signalDefinition signals[] =
+{
+    {SIGINT, "SIGINT"},
+    {SIGQUIT, "SIGQUIT"},
+    {SIGILL, "SIGILL"},
+    {SIGTRAP, "SIGTRAP"},
+    {SIGIOT, "SIGIOT"},
+#ifdef SIGEMT
+    {SIGEMT, "SIGEMT"},
+#endif
+    {SIGFPE, "SIGFPE"},
+    {SIGBUS, "SIGBUS"},
+    {SIGSEGV, "SIGSEGV"},
+    {SIGSYS, "SIGSYS"},
+    {SIGPIPE, "SIGPIPE"},
+    {SIGALRM, "SIGALRM"},
+    {SIGTERM, "SIGTERM"},
+    {SIGUSR1, "SIGUSR1"},
+    {SIGUSR2, "SIGUSR2"},
+#ifdef SIGCLD
+    {SIGCLD, "SIGCLD"},
+#endif
+#ifdef SIGPWR
+    {SIGPWR, "SIGPWR"},
+#endif
+    {SIGWINCH, "SIGWINCH"},
+    {SIGURG, "SIGURG"},
+#ifdef SIGPOLL
+    {SIGPOLL, "SIGPOLL"},
+#endif
+    {SIGSTOP, "SIGSTOP"},
+    {SIGTSTP, "SIGTSTP"},
+    {SIGCONT, "SIGCONT"},
+    {SIGTTIN, "SIGTTIN"},
+    {SIGTTOU, "SIGTTOU"},
+    {SIGVTALRM, "SIGVTALRM"},
+    {SIGPROF, "SIGPROF"},
+    {SIGXCPU, "SIGXCPU"},
+    {SIGXFSZ, "SIGXFSZ"},
+#ifdef SIGWAITING
+    {SIGWAITING, "SIGWAITING"},
+#endif
+#ifdef SIGLWP
+    {SIGLWP, "SIGLWP"},
+#endif
+#ifdef SIGFREEZE
+    {SIGFREEZE, "SIGFREEZE"},
+#endif
+#ifdef SIGTHAW
+    {SIGTHAW, "SIGTHAW"},
+#endif
+#ifdef SIGLOST
+    {SIGLOST, "SIGLOST"},
+#endif
+#ifdef SIGXRES
+    {SIGXRES, "SIGXRES"},
+#endif
+    {SIGHUP, "SIGHUP"}
+};
+
+boolean isSupportedSigScenario ()
+{
+    if ( (!strcmp(scenario, "nojvm")) || (!strcmp(scenario, "prepre")) || (!strcmp(scenario, "prepost")) ||
+                (!strcmp(scenario, "postpost")) || (!strcmp(scenario, "postpre")) )
+    {
+        // printf("%s is a supported scenario\n", scenario);
+        return TRUE;
+    }
+    else
+    {
+        printf("ERROR: %s is not a supported scenario\n", scenario);
+        return FALSE;
+    }
+}
+
+boolean isSupportedSigMode ()
+{
+    if ( (!strcmp(mode, "sigset")) || (!strcmp(mode, "sigaction")) )
+    {
+        // printf("%s is a supported mode\n", mode);
+        return TRUE;
+    }
+    else
+    {
+        printf("ERROR: %s is not a supported mode\n", mode);
+        return FALSE;
+    }
+}
+
+int getSigNumBySigName(const char* sigName)
+{
+    int signals_len, sigdef_len, total_sigs, i=0;
+
+    if (sigName == NULL) return -1;
+
+    signals_len = sizeof(signals);
+    sigdef_len = sizeof(signalDefinition);
+    total_sigs = signals_len / sigdef_len;
+    for (i = 0; i < total_sigs; i++)
+    {
+        // printf("Inside for loop, i = %d\n", i);
+        if (!strcmp(sigName, signals[i].sigName))
+            return signals[i].sigNum;
+    }
+
+    return -1;
+}
+
+// signal handler
+void handler(int sig)
+{
+    printf("%s: signal handler for signal %d has been processed\n", signal_name, signal_num);
+    sig_received = 1;
+}
+
+// Initialize VM with given options
+void initVM()
+{
+    JavaVMInitArgs vm_args;
+    int i =0;
+    jint result;
+
+    vm_args.nOptions = numOptions;
+    vm_args.version = JNI_VERSION_1_2;
+    vm_args.ignoreUnrecognized = JNI_FALSE;
+    vm_args.options = options;
+
+/* try hardcoding options
+    JavaVMOption option1[2];
+    option1[0].optionString="-XX:+PrintCommandLineFlags";
+    option1[1].optionString="-Xrs";
+*/
+    vm_args.options=options;
+    vm_args.nOptions=numOptions;
+
+    // Print the VM options in use
+    printf("initVM: numOptions = %d\n", vm_args.nOptions);
+    for (i = 0; i < vm_args.nOptions; i++)
+    {
+        printf("\tvm_args.options[%d].optionString = %s\n", i, vm_args.options[i].optionString);
+    }
+
+    // Initialize VM with given options
+    result = JNI_CreateJavaVM( &vm, (void **) &env, &vm_args );
+
+    // Did the VM initialize successfully ?
+    if (result != 0)
+    {
+        printf("ERROR: cannot create Java VM.\n");
+        exit(TEST_FAILED);
+    }
+
+    (*vm)->AttachCurrentThread(vm, (void **) &env,  (void *) 0);
+    printf("initVM: JVM started and attached\n");
+}
+
+// Function to set up signal handler
+void setSignalHandler()
+{
+    int retval = 0 ;
+
+    if (!strcmp(mode, "sigaction"))
+    {
+        struct sigaction act;
+        act.sa_handler = handler;
+        sigemptyset(&act.sa_mask);
+        act.sa_flags = 0;
+        retval = sigaction(signal_num, &act, 0);
+        if (retval != 0) {
+           printf("ERROR: failed to set signal handler using function %s, error=%s\n", mode, strerror(errno));
+           exit(TEST_FAILED);
+        }
+    } // end - dealing with sigaction
+    else if (!strcmp(mode, "sigset"))
+    {
+        sigset(signal_num, handler);
+    } // end dealing with sigset
+    printf("%s: signal handler using function '%s' has been set\n", signal_name, mode);
+}
+
+// Function to invoke given signal
+void invokeSignal()
+{
+    int pid, retval;
+    sigset_t new_set, old_set;
+
+    pid = getpid();
+    retval = 0;
+
+    // we need to unblock the signal in case it was previously blocked by JVM
+    // and as result inherited by child process
+    // (this is at least the case for SIGQUIT in case -Xrs flag is not used).
+    // Otherwise the test will timeout.
+    sigemptyset(&new_set);
+    sigaddset(&new_set, signal_num);
+    sigprocmask(SIG_UNBLOCK, &new_set, &old_set);
+    if (retval != 0) {
+        printf("ERROR: failed to unblock signal, error=%s\n", strerror(errno));
+        exit(TEST_FAILED);
+    }
+
+    // send the signal
+    retval = kill(pid, signal_num);
+    if (retval != 0)
+    {
+        printf("ERROR: failed to send signal %s, error=%s\n", signal_name, strerror(errno));
+        exit(TEST_FAILED);
+    }
+
+    // set original mask for the signal
+    retval = sigprocmask(SIG_SETMASK, &old_set, NULL);
+    if (retval != 0) {
+        printf("ERROR: failed to set original mask for signal, error=%s\n", strerror(errno));
+        exit(TEST_FAILED);
+    }
+
+    printf("%s: signal has been sent successfully\n", signal_name);
+}
+
+// Usage function
+void printUsage()
+{
+    printf("Usage: sigtest -sig {signal_name} -mode {signal | sigset | sigaction } -scenario {nojvm | postpre | postpost | prepre | prepost}> [-vmopt jvm_option] \n");
+    printf("\n");
+    exit(TEST_FAILED);
+}
+
+// signal handler BEFORE VM initialization AND
+// Invoke signal BEFORE VM exits
+void scen_prepre()
+{
+    setSignalHandler();
+    initVM();
+    invokeSignal();
+    (*vm)->DestroyJavaVM(vm);
+}
+
+// signal handler BEFORE VM initialization AND
+// Invoke signal AFTER VM exits
+void scen_prepost()
+{
+    setSignalHandler();
+    initVM();
+    (*vm)->DestroyJavaVM(vm);
+    invokeSignal();
+}
+
+// signal handler AFTER VM initialization AND
+// Invoke signal BEFORE VM exits
+void scen_postpre()
+{
+    initVM();
+    setSignalHandler();
+    invokeSignal();
+    (*vm)->DestroyJavaVM(vm);
+}
+
+// signal handler AFTER VM initializationAND
+// Invoke signal AFTER VM exits
+void scen_postpost()
+{
+    initVM();
+    setSignalHandler();
+    (*vm)->DestroyJavaVM(vm);
+    invokeSignal();
+}
+
+// signal handler with no JVM in picture
+void scen_nojvm()
+{
+    setSignalHandler();
+    invokeSignal();
+}
+
+void run()
+{
+    // print the current scenario
+    if (!strcmp(scenario, "postpre"))
+        scen_postpre();
+    else if (!strcmp(scenario, "postpost"))
+        scen_postpost();
+    else if (!strcmp(scenario, "prepre"))
+        scen_prepre();
+    else if (!strcmp(scenario, "prepost"))
+        scen_prepost();
+    else if (!strcmp(scenario, "nojvm"))
+        scen_nojvm();
+}
+
+// main main
+int main(int argc, char **argv)
+{
+    int i=0, j;
+
+    signal_num = -1;
+    signal_name = NULL;
+
+    // Parse the arguments and find out how many vm args we have
+    for (i=1; i<argc; i++)
+    {
+        if (! strcmp(argv[i], "-sig") )
+        {
+            i++;
+            if ( i >= argc )
+            {
+                printUsage();
+            }
+            signal_name = argv[i];
+
+        }
+        else if (!strcmp(argv[i], "-mode"))
+        {
+            i++;
+            if ( i >= argc )
+            {
+                printUsage();
+            }
+            mode = argv[i];
+        }
+        else if (!strcmp(argv[i], "-scenario"))
+        {
+            i++;
+            if ( i >= argc )
+            {
+                printUsage();
+            }
+            scenario = argv[i];
+        }
+        else if (!strcmp(argv[i], "-vmopt"))
+        {
+            i++;
+            if ( i >= argc )
+            {
+                printUsage();
+            }
+            numOptions++;
+        }
+        else
+        {
+            printUsage();
+        }
+    }
+
+    if ( !isSupportedSigScenario() || !isSupportedSigMode() )
+    {
+        printUsage();
+    }
+
+    // get signal number by it's name
+    signal_num = getSigNumBySigName(signal_name);
+    if (signal_num == -1)
+    {
+      printf("%s: unknown signal, perhaps is not supported on this platform, ignore\n",
+            signal_name);
+      exit(TEST_PASSED);
+    }
+
+    j = 0;
+    // Initialize given number of VM options
+    if (numOptions > 0)
+    {
+        options = (JavaVMOption *) malloc(numOptions * sizeof(JavaVMOption));
+        for (i=0; i<argc; i++)
+        {
+            // parse VM options
+            if (!strcmp(argv[i], "-vmopt"))
+            {
+                i++;
+                if ( i >= argc )
+                {
+                    printUsage();
+                }
+                options[j].optionString = argv[i];
+                j++;
+            }
+        }
+    }
+
+    // do signal invocation
+    printf("%s: start testing: signal_num=%d,  mode=%s, scenario=%s\n", signal_name, signal_num, mode, scenario);
+    run();
+
+    while (!sig_received) {
+      sleep(1);
+      printf("%s: waiting for getting signal 1sec ...\n", signal_name);
+    }
+
+    printf("%s: signal has been received\n", signal_name);
+
+    free(options);
+
+    return (sig_received ? TEST_PASSED : TEST_FAILED);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/attach/ShMemLongName.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8049695
+ * @summary Ensure shmem transport works with long names
+ * @requires os.family == "windows"
+ * @library /test/lib
+ * @run main/othervm ShMemLongName
+ */
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.InputStream;
+import java.util.Collections;
+import java.util.Map;
+
+import com.sun.jdi.Bootstrap;
+import com.sun.jdi.VirtualMachine;
+import com.sun.jdi.connect.AttachingConnector;
+import com.sun.jdi.connect.Connector;
+import jdk.test.lib.process.ProcessTools;
+
+
+public class ShMemLongName {
+
+    private static final int maxShMemLength = 49;
+
+    private static final String transport = "dt_shmem";
+
+    public static void main(String[] args) throws Exception {
+        // test with the maximum supported shmem name length
+        String shmemName = ("ShMemLongName" + ProcessHandle.current().pid()
+                                    + String.join("", Collections.nCopies(maxShMemLength, "x"))
+                                 ).substring(0, maxShMemLength);
+        Process target = getTarget(shmemName).start();
+        try {
+            waitForReady(target);
+
+            log("attaching to the VM...");
+            AttachingConnector ac = Bootstrap.virtualMachineManager().attachingConnectors()
+                    .stream()
+                    .filter(c -> transport.equals(c.transport().name()))
+                    .findFirst()
+                    .orElseThrow(() -> new RuntimeException("Failed to find transport " + transport));
+            Map<String, Connector.Argument> acArgs = ac.defaultArguments();
+            acArgs.get("name").setValue(shmemName);
+
+            VirtualMachine vm = ac.attach(acArgs);
+
+            log("attached. test(1) PASSED.");
+
+            vm.dispose();
+        } finally {
+            target.destroy();
+            target.waitFor();
+        }
+
+        // extra test: ensure using of too-long name fails gracefully
+        // (shmemName + "X") is expected to be "too long".
+        ProcessTools.executeProcess(getTarget(shmemName + "X"))
+                .shouldContain("address strings longer than")
+                .shouldHaveExitValue(2);
+        log("test(2) PASSED.");
+    }
+
+    private static void log(String s) {
+        System.out.println(s);
+        System.out.flush();
+    }
+
+    // creates target process builder for the specified shmem transport name
+    private static ProcessBuilder getTarget(String shmemName) throws IOException {
+        log("starting target with shmem name: '" + shmemName + "'...");
+        return ProcessTools.createJavaProcessBuilder(
+                "-Xdebug",
+                "-Xrunjdwp:transport=" + transport + ",server=y,suspend=n,address=" + shmemName,
+                "ShMemLongName$Target");
+    }
+
+    private static void waitForReady(Process target) throws Exception {
+        InputStream os = target.getInputStream();
+        try (BufferedReader reader = new BufferedReader(new InputStreamReader(os))) {
+            String line;
+            while ((line = reader.readLine()) != null) {
+                if (line.equals(Target.readyString)) {
+                    return;
+                }
+            }
+        }
+    }
+
+    public static class Target {
+        public static final String readyString = "Ready";
+        public static void main(String[] args) throws Exception {
+            log(readyString);
+            while (true) {
+                Thread.sleep(1000);
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/jvmti/RetransformClassesZeroLength.java	Fri Apr 13 03:05:19 2018 +0200
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8198393
+ * @summary Instrumentation.retransformClasses(new Class[0]) should be NOP
+ * @library /test/lib
+ * @modules java.instrument
+ * @compile RetransformClassesZeroLength.java
+ * @run main RetransformClassesZeroLength
+ */
+
+import java.lang.instrument.ClassFileTransformer;
+import java.lang.instrument.IllegalClassFormatException;
+import java.lang.instrument.Instrumentation;
+import java.lang.instrument.UnmodifiableClassException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.security.ProtectionDomain;
+
+import jdk.test.lib.process.ProcessTools;
+
+
+public class RetransformClassesZeroLength {
+
+    private static String manifest =
+            "Premain-Class: " + RetransformClassesZeroLength.Agent.class.getName() + "\n"
+            + "Can-Retransform-Classes: true\n";
+
+    private static String CP = System.getProperty("test.classes");
+
+    public static void main(String args[]) throws Throwable {
+        String agentJar = buildAgent();
+        ProcessTools.executeProcess(
+                ProcessTools.createJavaProcessBuilder(
+                        "-javaagent:" + agentJar,
+                        "-version")
+        ).shouldHaveExitValue(0);
+    }
+
+    private static String buildAgent() throws Exception {
+        Path jar = Files.createTempFile(Paths.get("."), null, ".jar");
+        String jarPath = jar.toAbsolutePath().toString();
+        ClassFileInstaller.writeJar(jarPath,
+                ClassFileInstaller.Manifest.fromString(manifest),
+                RetransformClassesZeroLength.class.getName());
+        return jarPath;
+    }
+
+
+    public static class Agent implements ClassFileTransformer {
+        public static void premain(String args, Instrumentation inst) {
+            inst.addTransformer(new NoOpTransformer());
+            try {
+                inst.retransformClasses(new Class[0]);
+            } catch (UnmodifiableClassException ex) {
+                throw new AssertionError(ex);
+            }
+        }
+    }
+
+    private static class NoOpTransformer implements ClassFileTransformer {
+        @Override
+        public byte[] transform(ClassLoader loader,
+                                String className,
+                                Class<?> classBeingRedefined,
+                                ProtectionDomain protectionDomain,
+                                byte[] classfileBuffer
+                                ) throws IllegalClassFormatException {
+            return null;    // no transform
+        }
+    }
+}
--- a/test/hotspot/jtreg/serviceability/sa/ClhsdbSymbol.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbSymbol.java	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -55,7 +55,7 @@
                     "Ljava/io/InputStream", "LambdaMetafactory", "PerfCounter",
                     "isAnonymousClass", "JVMTI_THREAD_STATE_TERMINATED", "jdi",
                     "checkGetClassLoaderPermission", "lockCreationTime",
-                    "storedAppOutput", "storedAppOutput", "getProcess",
+                    "stderrBuffer", "stdoutBuffer", "getProcess",
                     "LingeredApp"));
 
             test.run(theApp.getPid(), cmds, expStrMap, null);
--- a/test/hotspot/jtreg/serviceability/sa/TestIntConstant.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/test/hotspot/jtreg/serviceability/sa/TestIntConstant.java	Fri Apr 13 03:05:19 2018 +0200
@@ -110,7 +110,7 @@
             // with names and the values derived from enums and #define preprocessor
             // macros in hotspot.
             String[] defaultOutputStrings =
-                {"CollectedHeap::G1CollectedHeap 2",
+                {"CollectedHeap::G1 4",
                  "RUNNABLE 2",
                  "Deoptimization::Reason_class_check 4",
                  "InstanceKlass::_misc_is_anonymous 32",
--- a/test/hotspot/jtreg/testlibrary/jittester/Makefile	Fri Apr 13 09:06:37 2018 +0800
+++ b/test/hotspot/jtreg/testlibrary/jittester/Makefile	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -69,7 +69,7 @@
 DIST_JAR = $(DIST_DIR)/JITtester.jar
 
 SRC_FILES = $(shell find $(SRC_DIR) -name '*.java')
-TESTLIBRARY_SRC_DIR = ../../../../test/lib/jdk/test/lib
+TESTLIBRARY_SRC_DIR = ../../../../lib/jdk/test/lib
 TESTLIBRARY_SRC_FILES = $(TESTLIBRARY_SRC_DIR)/Asserts.java \
                         $(TESTLIBRARY_SRC_DIR)/JDKToolFinder.java \
                         $(TESTLIBRARY_SRC_DIR)/JDKToolLauncher.java \
@@ -125,13 +125,20 @@
 	@cp ../../compiler/aot/AotCompiler.java $(TESTBASE_DIR)/compiler/aot
 
 testgroup: $(TESTBASE_DIR)
-	@echo 'jittester_all = \\' > $(TESTGROUP_FILE)
+	@echo 'jittester_all = \' > $(TESTGROUP_FILE)
 	@echo '	/' >> $(TESTGROUP_FILE)
 	@echo '' >> $(TESTGROUP_FILE)
+	@echo 'jit_tests = \' >> $(TESTGROUP_FILE)
+	@echo ' java_tests \' >> $(TESTGROUP_FILE)
+	@echo ' bytecode_tests' >> $(TESTGROUP_FILE)
+	@echo '' >> $(TESTGROUP_FILE)
+	@echo 'aot_tests = \' >> $(TESTGROUP_FILE)
+	@echo ' aot_bytecode_tests \' >> $(TESTGROUP_FILE)
+	@echo ' aot_java_tests' >> $(TESTGROUP_FILE)
+	@echo '' >> $(TESTGROUP_FILE)
 
 testroot: $(TESTBASE_DIR)
 	@echo 'groups=TEST.groups' > $(TESTROOT_FILE)
 
 $(TESTBASE_DIR) $(DIST_DIR) $(TESTBASE_DIR)/jdk/test/lib/jittester/jtreg $(TESTBASE_DIR)/compiler/aot:
 	$(shell if [ ! -d $@ ]; then mkdir -p $@; fi)
-
--- a/test/jdk/ProblemList.txt	Fri Apr 13 09:06:37 2018 +0800
+++ b/test/jdk/ProblemList.txt	Fri Apr 13 03:05:19 2018 +0200
@@ -767,7 +767,6 @@
 
 sun/tools/jstat/jstatClassloadOutput1.sh                        8173942 generic-all
 
-sun/jvmstat/monitor/MonitoredVm/MonitorVmStartTerminate.java    8057732 generic-all
 
 ############################################################################
 
--- a/test/jtreg-ext/requires/VMProps.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/test/jtreg-ext/requires/VMProps.java	Fri Apr 13 03:05:19 2018 +0200
@@ -228,13 +228,9 @@
      *    User either set G1 explicitely (-XX:+UseG1GC) or did not set any GC
      * @param map - property-value pairs
      */
-    protected void vmGC(Map<String, String> map){
-        GC currentGC = GC.current();
-        boolean isByErgo = GC.currentSetByErgo();
-        List<GC> supportedGC = GC.allSupported();
+    protected void vmGC(Map<String, String> map) {
         for (GC gc: GC.values()) {
-            boolean isSupported = supportedGC.contains(gc);
-            boolean isAcceptable = isSupported && (gc == currentGC || isByErgo);
+            boolean isAcceptable = gc.isSupported() && (gc.isSelected() || GC.isSelectedErgonomically());
             map.put("vm.gc." + gc.name(), "" + isAcceptable);
         }
     }
--- a/test/lib/jdk/test/lib/apps/LingeredApp.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/test/lib/jdk/test/lib/apps/LingeredApp.java	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,9 +24,12 @@
 package jdk.test.lib.apps;
 
 import java.io.BufferedReader;
+import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.io.StringReader;
 import java.nio.file.Files;
 import java.nio.file.NoSuchFileException;
 import java.nio.file.Path;
@@ -37,7 +40,11 @@
 import java.util.Date;
 import java.util.List;
 import java.util.Map;
+import java.util.stream.Collectors;
 import java.util.UUID;
+import jdk.test.lib.process.OutputBuffer;
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.StreamPumper;
 
 /**
  * This is a framework to launch an app that could be synchronized with caller
@@ -69,39 +76,16 @@
     private static final long spinDelay = 1000;
 
     private long lockCreationTime;
-    private final ArrayList<String> storedAppOutput;
+    private ByteArrayOutputStream stderrBuffer;
+    private ByteArrayOutputStream stdoutBuffer;
+    private Thread outPumperThread;
+    private Thread errPumperThread;
 
     protected Process appProcess;
+    protected OutputBuffer output;
     protected static final int appWaitTime = 100;
     protected final String lockFileName;
 
-    /*
-     * Drain child process output, store it into string array
-     */
-    class InputGobbler extends Thread {
-
-        InputStream is;
-        List<String> astr;
-
-        InputGobbler(InputStream is, List<String> astr) {
-            this.is = is;
-            this.astr = astr;
-        }
-
-        public void run() {
-            try {
-                InputStreamReader isr = new InputStreamReader(is);
-                BufferedReader br = new BufferedReader(isr);
-                String line = null;
-                while ((line = br.readLine()) != null) {
-                    astr.add(line);
-                }
-            } catch (IOException ex) {
-                // pass
-            }
-        }
-    }
-
     /**
      * Create LingeredApp object on caller side. Lock file have be a valid filename
      * at writable location
@@ -110,13 +94,11 @@
      */
     public LingeredApp(String lockFileName) {
         this.lockFileName = lockFileName;
-        this.storedAppOutput = new ArrayList<String>();
     }
 
     public LingeredApp() {
         final String lockName = UUID.randomUUID().toString() + ".lck";
         this.lockFileName = lockName;
-        this.storedAppOutput = new ArrayList<String>();
     }
 
     /**
@@ -156,13 +138,48 @@
 
     /**
      *
-     * @return application output as string array. Empty array if application produced no output
+     * @return OutputBuffer object for the LingeredApp's output. Can only be called
+     * after LingeredApp has exited.
+     */
+    public OutputBuffer getOutput() {
+        if (appProcess.isAlive()) {
+            throw new RuntimeException("Process is still alive. Can't get its output.");
+        }
+        if (output == null) {
+            output = new OutputBuffer(stdoutBuffer.toString(), stderrBuffer.toString());
+        }
+        return output;
+    }
+
+    /*
+     * Capture all stdout and stderr output from the LingeredApp so it can be returned
+     * to the driver app later. This code is modeled after ProcessTools.getOutput().
+     */
+    private void startOutputPumpers() {
+        stderrBuffer = new ByteArrayOutputStream();
+        stdoutBuffer = new ByteArrayOutputStream();
+        StreamPumper outPumper = new StreamPumper(appProcess.getInputStream(), stdoutBuffer);
+        StreamPumper errPumper = new StreamPumper(appProcess.getErrorStream(), stderrBuffer);
+        outPumperThread = new Thread(outPumper);
+        errPumperThread = new Thread(errPumper);
+
+        outPumperThread.setDaemon(true);
+        errPumperThread.setDaemon(true);
+
+        outPumperThread.start();
+        errPumperThread.start();
+    }
+
+    /**
+     *
+     * @return application output as List. Empty List if application produced no output
      */
     public List<String> getAppOutput() {
         if (appProcess.isAlive()) {
             throw new RuntimeException("Process is still alive. Can't get its output.");
         }
-        return storedAppOutput;
+        BufferedReader bufReader = new BufferedReader(new StringReader(output.getStdout()));
+        return bufReader.lines().collect(Collectors.toList());
     }
 
     /* Make sure all part of the app use the same method to get dates,
@@ -211,13 +228,14 @@
     }
 
     public void waitAppTerminate() {
-        while (true) {
-            try {
-                appProcess.waitFor();
-                break;
-            } catch (InterruptedException ex) {
-                // pass
-            }
+        // This code is modeled after tail end of ProcessTools.getOutput().
+        try {
+            appProcess.waitFor();
+            outPumperThread.join();
+            errPumperThread.join();
+        } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+            // pass
         }
     }
 
@@ -280,7 +298,6 @@
         List<String> cmd = new ArrayList<String>();
         cmd.add(javapath);
 
-
         if (vmArguments == null) {
             // Propagate test.vm.options to LingeredApp, filter out possible empty options
             String testVmOpts[] = System.getProperty("test.vm.opts","").split("\\s+");
@@ -289,8 +306,7 @@
                     cmd.add(s);
                 }
             }
-        }
-        else{
+        } else {
             // Lets user manage LingeredApp options
             cmd.addAll(vmArguments);
         }
@@ -313,13 +329,7 @@
             cmdLine.append("'").append(strCmd).append("' ");
         }
 
-        System.out.println("Command line: [" + cmdLine.toString() + "]");
-    }
-
-    public void startGobblerPipe() {
-      // Create pipe reader for process, and read stdin and stderr to array of strings
-      InputGobbler gb = new InputGobbler(appProcess.getInputStream(), storedAppOutput);
-      gb.start();
+        System.err.println("Command line: [" + cmdLine.toString() + "]");
     }
 
     /**
@@ -339,13 +349,20 @@
         printCommandLine(cmd);
 
         ProcessBuilder pb = new ProcessBuilder(cmd);
-        // we don't expect any error output but make sure we are not stuck on pipe
-        // pb.redirectErrorStream(false);
         // ProcessBuilder.start can throw IOException
-        pb.redirectError(ProcessBuilder.Redirect.INHERIT);
         appProcess = pb.start();
 
-        startGobblerPipe();
+        startOutputPumpers();
+    }
+
+    private void finishApp() {
+        OutputBuffer output = getOutput();
+        String msg =
+            " LingeredApp stdout: [" + output.getStdout() + "];\n" +
+            " LingeredApp stderr: [" + output.getStderr() + "]\n" +
+            " LingeredApp exitValue = " + appProcess.exitValue();
+
+        System.err.println(msg);
     }
 
     /**
@@ -364,6 +381,7 @@
                 throw new IOException("LingeredApp terminated with non-zero exit code " + exitcode);
             }
         }
+        finishApp();
     }
 
     /**
@@ -384,6 +402,8 @@
             a.waitAppReady(appWaitTime);
         } catch (Exception ex) {
             a.deleteLock();
+            System.err.println("LingeredApp failed to start: " + ex);
+            a.finishApp();
             throw ex;
         }
 
--- a/test/lib/sun/hotspot/WhiteBox.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/test/lib/sun/hotspot/WhiteBox.java	Fri Apr 13 03:05:19 2018 +0200
@@ -382,9 +382,9 @@
 
   // Don't use these methods directly
   // Use sun.hotspot.gc.GC class instead.
-  public native int currentGC();
-  public native int allSupportedGC();
-  public native boolean gcSelectedByErgo();
+  public native boolean isGCSupported(int name);
+  public native boolean isGCSelected(int name);
+  public native boolean isGCSelectedErgonomically();
 
   // Force Young GC
   public native void youngGC();
--- a/test/lib/sun/hotspot/gc/GC.java	Fri Apr 13 09:06:37 2018 +0800
+++ b/test/lib/sun/hotspot/gc/GC.java	Fri Apr 13 03:05:19 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,8 +23,6 @@
 
 package sun.hotspot.gc;
 
-import java.util.ArrayList;
-import java.util.List;
 import sun.hotspot.WhiteBox;
 
 /**
@@ -32,72 +30,41 @@
  * retrieved from the VM with the WhiteBox API.
  */
 public enum GC {
+    /*
+     * Enum values much match CollectedHeap::Name
+     */
     Serial(1),
     Parallel(2),
-    ConcMarkSweep(4),
-    G1(8);
+    ConcMarkSweep(3),
+    G1(4);
 
-    private static final GC CURRENT_GC;
-    private static final int ALL_GC_CODES;
-    private static final boolean IS_BY_ERGO;
-    static {
-        WhiteBox WB = WhiteBox.getWhiteBox();
-        ALL_GC_CODES = WB.allSupportedGC();
-        IS_BY_ERGO = WB.gcSelectedByErgo();
+    private static final WhiteBox WB = WhiteBox.getWhiteBox();
 
-        int currentCode = WB.currentGC();
-        GC tmp = null;
-        for (GC gc: GC.values()) {
-            if (gc.code == currentCode) {
-                tmp = gc;
-                break;
-            }
-        }
-        if (tmp == null) {
-            throw new Error("Unknown current GC code " + currentCode);
-        }
-        CURRENT_GC = tmp;
-    }
+    private final int name;
 
-    private final int code;
-    private GC(int code) {
-        this.code = code;
+    private GC(int name) {
+        this.name = name;
     }
 
     /**
-     * @return true if the collector is supported by the VM, false otherwise.
+     * @return true if this GC is supported by the VM
      */
     public boolean isSupported() {
-        return (ALL_GC_CODES & code) != 0;
-    }
-
-
-    /**
-     * @return the current collector used by VM.
-     */
-    public static GC current() {
-        return CURRENT_GC;
+        return WB.isGCSupported(name);
     }
 
     /**
-     * @return true if GC was selected by ergonomic, false if specified
-     * explicitly by the command line flag.
+     * @return true if this GC is currently selected/used
      */
-    public static boolean currentSetByErgo() {
-        return IS_BY_ERGO;
+    public boolean isSelected() {
+        return WB.isGCSelected(name);
     }
 
     /**
-     * @return List of collectors supported by the VM.
+     * @return true if GC was selected ergonomically, as opposed
+     *         to being explicitly specified on the command line
      */
-    public static List<GC> allSupported() {
-        List<GC> list = new ArrayList<>();
-        for (GC gc: GC.values()) {
-            if (gc.isSupported()) {
-                list.add(gc);
-            }
-        }
-        return list;
+    public static boolean isSelectedErgonomically() {
+        return WB.isGCSelectedErgonomically();
     }
 }
-