Merge
authorjwilhelm
Fri, 30 Oct 2015 00:02:37 +0100
changeset 33625 18e7896ca9fe
parent 33373 4a0312f2894b (current diff)
parent 33624 509a72e7127b (diff)
child 33626 3c94db05e903
child 33731 31e440f07baa
Merge
hotspot/src/share/vm/oops/constantPool.cpp
hotspot/src/share/vm/oops/constantPool.hpp
--- a/hotspot/agent/src/os/linux/symtab.c	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/agent/src/os/linux/symtab.c	Fri Oct 30 00:02:37 2015 +0100
@@ -545,6 +545,7 @@
      return (uintptr_t)NULL;
 
   item.key = (char*) strdup(sym_name);
+  item.data = NULL;
   hsearch_r(item, FIND, &ret, symtab->hash_table);
   if (ret) {
     struct elf_symbol * sym = (struct elf_symbol *)(ret->data);
--- a/hotspot/make/Makefile	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/make/Makefile	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -403,6 +403,8 @@
 	$(install-dir)
 $(EXPORT_SERVER_DIR)/%.dSYM:       		$(C2_BUILD_DIR)/%.dSYM
 	$(install-dir)
+$(EXPORT_SERVER_DIR)/%.symbols:                 $(C2_BUILD_DIR)/%.symbols
+	$(install-file)
 endif
 
 # Client (C1)
--- a/hotspot/make/bsd/makefiles/buildtree.make	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/make/bsd/makefiles/buildtree.make	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -224,6 +224,11 @@
 	echo "OPENJDK = $(OPENJDK)"; \
 	echo "$(LP64_SETTING/$(DATA_MODE))"; \
 	echo; \
+	echo "STATIC_BUILD = $(STATIC_BUILD)"; \
+	echo "COMPILER_WARNINGS_FATAL = $(COMPILER_WARNINGS_FATAL)"; \
+	echo "EXTRA_LDFLAGS = $(EXTRA_LDFLAGS)"; \
+	echo "LIBRARY_SUFFIX = $(LIBRARY_SUFFIX)"; \
+	echo; \
 	echo "# Used for platform dispatching"; \
 	echo "TARGET_DEFINES  = -DTARGET_OS_FAMILY_\$$(Platform_os_family)"; \
 	echo "TARGET_DEFINES += -DTARGET_ARCH_\$$(Platform_arch)"; \
--- a/hotspot/make/bsd/makefiles/defs.make	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/make/bsd/makefiles/defs.make	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -186,13 +186,16 @@
   # executed multiple times. We reduce the noise by at least checking that
   # BUILD_FLAVOR has been set.
   ifneq ($(BUILD_FLAVOR),)
-    ifeq ($(BUILD_FLAVOR), product)
-      FULL_DEBUG_SYMBOLS ?= 1
-      ENABLE_FULL_DEBUG_SYMBOLS = $(FULL_DEBUG_SYMBOLS)
-    else
-      # debug variants always get Full Debug Symbols (if available)
-      ENABLE_FULL_DEBUG_SYMBOLS = 1
-    endif
+    # FULL_DEBUG_SYMBOLS not created for individual static libraries
+    ifeq ($(STATIC_BUILD),false)
+      ifeq ($(BUILD_FLAVOR), product)
+        FULL_DEBUG_SYMBOLS ?= 1
+        ENABLE_FULL_DEBUG_SYMBOLS = $(FULL_DEBUG_SYMBOLS)
+      else
+        # debug variants always get Full Debug Symbols (if available)
+        ENABLE_FULL_DEBUG_SYMBOLS = 1
+      endif
+     endif
     $(eval $(call print_info, "ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)"))
     # since objcopy is optional, we set ZIP_DEBUGINFO_FILES later
 
@@ -256,16 +259,24 @@
 JDK_INCLUDE_SUBDIR=bsd
 
 # Library suffix
-ifeq ($(OS_VENDOR),Darwin)
-  LIBRARY_SUFFIX=dylib
+ifneq ($(STATIC_BUILD),true)
+  ifeq ($(OS_VENDOR),Darwin)
+    LIBRARY_SUFFIX=dylib
+  else
+    LIBRARY_SUFFIX=so
+  endif
 else
-  LIBRARY_SUFFIX=so
+  LIBRARY_SUFFIX=a
 endif
 
+
 EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html
 
+# jsig library not needed for static builds
+ifneq ($(STATIC_BUILD),true)
 # client and server subdirectories have symbolic links to ../libjsig.so
-EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
+  EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
+endif
 
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   ifeq ($(ZIP_DEBUGINFO_FILES),1)
@@ -286,6 +297,9 @@
 ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
   EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
   EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX)
+  ifeq ($(STATIC_BUILD),true)
+    EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.symbols
+  endif
 
   ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
     ifeq ($(ZIP_DEBUGINFO_FILES),1)
@@ -303,6 +317,9 @@
 ifeq ($(JVM_VARIANT_CLIENT),true)
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX)
+  ifeq ($(STATIC_BUILD),true)
+    EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.symbols
+  endif
 
   ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
     ifeq ($(ZIP_DEBUGINFO_FILES),1)
@@ -320,6 +337,9 @@
 ifeq ($(JVM_VARIANT_MINIMAL1),true)
   EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/Xusage.txt
   EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.$(LIBRARY_SUFFIX)
+  ifeq ($(STATIC_BUILD),true)
+    EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.symbols
+  endif
 endif
 
 # Serviceability Binaries
@@ -388,7 +408,9 @@
     endif
 
     # Binaries to 'universalize' if built
-    UNIVERSAL_LIPO_LIST += $(EXPORT_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX)
+    ifneq ($(STATIC_BUILD),true)
+      UNIVERSAL_LIPO_LIST += $(EXPORT_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX)
+    endif
     UNIVERSAL_LIPO_LIST += $(EXPORT_LIB_DIR)/libsaproc.$(LIBRARY_SUFFIX)
     UNIVERSAL_LIPO_LIST += $(EXPORT_LIB_DIR)/server/libjvm.$(LIBRARY_SUFFIX)
     UNIVERSAL_LIPO_LIST += $(EXPORT_LIB_DIR)/client/libjvm.$(LIBRARY_SUFFIX)
@@ -396,6 +418,13 @@
     # Files to simply copy in place
     UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/server/Xusage.txt
     UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/client/Xusage.txt
+
+    ifeq ($(STATIC_BUILD),true)
+      UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/server/libjvm.symbols
+      UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/client/libjvm.symbols
+      UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/minimal/libjvm.symbols
+    endif
+
     ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
       ifeq ($(ZIP_DEBUGINFO_FILES),1)
           UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/server/libjvm.diz
--- a/hotspot/make/bsd/makefiles/dtrace.make	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/make/bsd/makefiles/dtrace.make	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -37,15 +37,15 @@
 # Bsd does not build libjvm_db, does not compile on macosx
 # disabled in build: rule in vm.make
 JVM_DB = libjvm_db
-LIBJVM_DB = libjvm_db.dylib
+LIBJVM_DB = libjvm_db.$(LIBRARY_SUFFIX)
 
-LIBJVM_DB_DEBUGINFO   = libjvm_db.dylib.dSYM
+LIBJVM_DB_DEBUGINFO   = libjvm_db.$(LIBRARY_SUFFIX).dSYM
 LIBJVM_DB_DIZ         = libjvm_db.diz
 
 JVM_DTRACE = jvm_dtrace
-LIBJVM_DTRACE = libjvm_dtrace.dylib
+LIBJVM_DTRACE = libjvm_dtrace.$(LIBRARY_SUFFIX)
 
-LIBJVM_DTRACE_DEBUGINFO   = libjvm_dtrace.dylib.dSYM
+LIBJVM_DTRACE_DEBUGINFO   = libjvm_dtrace.$(LIBRARY_SUFFIX).dSYM
 LIBJVM_DTRACE_DIZ         = libjvm_dtrace.diz
 
 JVMOFFS = JvmOffsets
@@ -167,14 +167,14 @@
 
 LFLAGS_GENOFFS += -L.
 
-lib$(GENOFFS).dylib: $(DTRACE_SRCDIR)/$(GENOFFS).cpp $(DTRACE_SRCDIR)/$(GENOFFS).h \
+lib$(GENOFFS).$(LIBRARY_SUFFIX): $(DTRACE_SRCDIR)/$(GENOFFS).cpp $(DTRACE_SRCDIR)/$(GENOFFS).h \
                   $(LIBJVM.o)
 	$(QUIETLY) $(CXX) $(CXXFLAGS) $(GENOFFS_CFLAGS) $(SHARED_FLAG) $(PICFLAG) \
 		 $(LFLAGS_GENOFFS) -o $@ $(DTRACE_SRCDIR)/$(GENOFFS).cpp -ljvm
 
-$(GENOFFS): $(DTRACE_SRCDIR)/$(GENOFFS)Main.c lib$(GENOFFS).dylib
+$(GENOFFS): $(DTRACE_SRCDIR)/$(GENOFFS)Main.c lib$(GENOFFS).$(LIBRARY_SUFFIX)
 	$(QUIETLY) $(LINK.CXX) -o $@ $(DTRACE_SRCDIR)/$(GENOFFS)Main.c \
-		./lib$(GENOFFS).dylib
+		./lib$(GENOFFS).$(LIBRARY_SUFFIX)
 
 # $@.tmp is created first to avoid an empty $(JVMOFFS).h if an error occurs.
 $(JVMOFFS).h: $(GENOFFS)
--- a/hotspot/make/bsd/makefiles/gcc.make	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/make/bsd/makefiles/gcc.make	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -186,7 +186,14 @@
   CFLAGS += $(LIBFFI_CFLAGS)
   CFLAGS += $(LLVM_CFLAGS)
 endif
+
+ifeq ($(STATIC_BUILD),true)
+CXXFLAGS += -DSTATIC_BUILD
+CFLAGS += -DSTATIC_BUILD
+else
 CFLAGS += $(VM_PICFLAG)
+endif
+
 CFLAGS += -fno-rtti
 CFLAGS += -fno-exceptions
 ifeq ($(USE_CLANG),)
--- a/hotspot/make/bsd/makefiles/jsig.make	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/make/bsd/makefiles/jsig.make	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -28,9 +28,9 @@
 JSIG   = jsig
 
 ifeq ($(OS_VENDOR), Darwin)
-  LIBJSIG   = lib$(JSIG).dylib
+  LIBJSIG   = lib$(JSIG).$(LIBRARY_SUFFIX)
 
-  LIBJSIG_DEBUGINFO   = lib$(JSIG).dylib.dSYM
+  LIBJSIG_DEBUGINFO   = lib$(JSIG).$(LIBRARY_SUFFIX).dSYM
   LIBJSIG_DIZ         = lib$(JSIG).diz
 else
   LIBJSIG   = lib$(JSIG).so
@@ -61,8 +61,14 @@
 
 $(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
 	@echo $(LOG_INFO) Making signal interposition lib...
+ifeq ($(STATIC_BUILD),true)
+	$(QUIETLY) $(CC) -c $(SYMFLAG) $(EXTRA_CFLAGS) $(ARCHFLAG) $(PICFLAG) \
+                          $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $(JSIG).o $<
+	$(QUIETLY) $(AR) $(ARFLAGS) $@ $(JSIG).o
+else
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
-                         $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) $(EXTRA_CFLAGS) -o $@ $<
+                          $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) $(EXTRA_CFLAGS) -o $@ $<
+endif
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   ifeq ($(OS_VENDOR), Darwin)
 	$(DSYMUTIL) $@
--- a/hotspot/make/bsd/makefiles/rules.make	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/make/bsd/makefiles/rules.make	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -39,7 +39,11 @@
 COMPILE.CC       = $(CC_COMPILE) -c
 GENASM.CC        = $(CC_COMPILE) -S
 LINK.CC          = $(CC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
+ifeq ($(STATIC_BUILD),true)
+LINK_LIB.CC      = $(AR) $(ARFLAGS)
+else
 LINK_LIB.CC      = $(CC) $(LFLAGS) $(SHARED_FLAG)
+endif
 PREPROCESS.CC    = $(CC_COMPILE) -E
 
 COMPILE.CXX      = $(CXX_COMPILE) -c
--- a/hotspot/make/bsd/makefiles/saproc.make	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/make/bsd/makefiles/saproc.make	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -28,9 +28,9 @@
 SAPROC   = saproc
 
 ifeq ($(OS_VENDOR), Darwin)
-  LIBSAPROC           = lib$(SAPROC).dylib
+  LIBSAPROC           = lib$(SAPROC).$(LIBRARY_SUFFIX)
 
-  LIBSAPROC_DEBUGINFO = lib$(SAPROC).dylib.dSYM
+  LIBSAPROC_DEBUGINFO = lib$(SAPROC).$(LIBRARY_SUFFIX).dSYM
   LIBSAPROC_DIZ       = lib$(SAPROC).diz
 else
   LIBSAPROC           = lib$(SAPROC).so
--- a/hotspot/make/bsd/makefiles/vm.make	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/make/bsd/makefiles/vm.make	Fri Oct 30 00:02:37 2015 +0100
@@ -142,10 +142,10 @@
 
 JVM    = jvm
 ifeq ($(OS_VENDOR), Darwin)
-  LIBJVM   = lib$(JVM).dylib
+  LIBJVM   = lib$(JVM).$(LIBRARY_SUFFIX)
   CFLAGS  += -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE
 
-  LIBJVM_DEBUGINFO   = lib$(JVM).dylib.dSYM
+  LIBJVM_DEBUGINFO   = lib$(JVM).$(LIBRARY_SUFFIX).dSYM
   LIBJVM_DIZ         = lib$(JVM).diz
 else
   LIBJVM   = lib$(JVM).so
@@ -261,6 +261,16 @@
                  { print $$0 }				\
              }' > $@ < $(MAPFILE)
 
+ifeq ($(STATIC_BUILD),true)
+EXPORTED_SYMBOLS = libjvm.symbols
+
+libjvm.symbols : mapfile
+	$(CP) mapfile libjvm.symbols
+
+else
+EXPORTED_SYMBOLS =
+endif
+
 mapfile_reorder : mapfile $(REORDERFILE)
 	rm -f $@
 	cat $^ > $@
@@ -288,9 +298,11 @@
   LFLAGS_VM                += $(SONAMEFLAG:SONAME=$(LIBJVM))
 
   ifeq ($(OS_VENDOR), Darwin)
-    LFLAGS_VM += -Xlinker -rpath -Xlinker @loader_path/.
-    LFLAGS_VM += -Xlinker -rpath -Xlinker @loader_path/..
-    LFLAGS_VM += -Xlinker -install_name -Xlinker @rpath/$(@F)
+    ifneq ($(STATIC_BUILD),true)
+      LFLAGS_VM += -Xlinker -rpath -Xlinker @loader_path/.
+      LFLAGS_VM += -Xlinker -rpath -Xlinker @loader_path/..
+      LFLAGS_VM += -Xlinker -install_name -Xlinker @rpath/$(@F)
+    endif
   else
     LFLAGS_VM                += -Wl,-z,defs
   endif
@@ -345,6 +357,10 @@
 endif
 
 $(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT)
+ifeq ($(STATIC_BUILD),true)
+	echo Linking static vm...;
+	$(LINK_LIB.CC) $@ $(LIBJVM.o)
+else
 	$(QUIETLY) {                                                    \
 	    echo $(LOG_INFO) Linking vm...;                                         \
 	    $(LINK_LIB.CXX/PRE_HOOK)                                     \
@@ -354,6 +370,8 @@
 	    rm -f $@.1; ln -s $@ $@.1;                                  \
 	}
 
+endif
+
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   ifeq ($(OS_VENDOR), Darwin)
 	$(DSYMUTIL) $@
@@ -410,10 +428,10 @@
 
 ifeq ($(OS_VENDOR), Darwin)
 # no libjvm_db for macosx
-build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(BUILDLIBSAPROC) dtraceCheck
+build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(BUILDLIBSAPROC) dtraceCheck $(EXPORTED_SYMBOLS)
 	echo "Doing vm.make build:"
 else
-build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(BUILDLIBSAPROC)
+build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(BUILDLIBSAPROC) $(EXPORTED_SYMBOLS)
 endif
 
 install: install_jvm install_jsig install_saproc
--- a/hotspot/make/linux/makefiles/gcc.make	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/make/linux/makefiles/gcc.make	Fri Oct 30 00:02:37 2015 +0100
@@ -223,6 +223,8 @@
     WARNING_FLAGS += -Wtype-limits
     # GCC < 4.8 don't accept this flag for C++.
     WARNING_FLAGS += -Wno-format-zero-length
+    # GCC 4.8 reports less false positives than the older compilers.
+    WARNING_FLAGS += -Wuninitialized
   endif
 endif
 
--- a/hotspot/src/cpu/aarch64/vm/methodHandles_aarch64.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/cpu/aarch64/vm/methodHandles_aarch64.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -62,7 +62,7 @@
 void MethodHandles::verify_klass(MacroAssembler* _masm,
                                  Register obj, SystemDictionary::WKID klass_id,
                                  const char* error_message) {
-  Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
+  InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
   KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
   Register temp = rscratch2;
   Register temp2 = rscratch1; // used by MacroAssembler::cmpptr
--- a/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1276,7 +1276,7 @@
 //    return to caller
 //
 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
-                                                methodHandle method,
+                                                const methodHandle& method,
                                                 int compile_id,
                                                 BasicType* in_sig_bt,
                                                 VMRegPair* in_regs,
--- a/hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -73,7 +73,7 @@
                                  Register obj_reg, SystemDictionary::WKID klass_id,
                                  Register temp_reg, Register temp2_reg,
                                  const char* error_message) {
-  Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
+  InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
   KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
   Label L_ok, L_bad;
   BLOCK_COMMENT("verify_klass {");
--- a/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1701,7 +1701,7 @@
 //   return to caller
 //
 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
-                                                methodHandle method,
+                                                const methodHandle& method,
                                                 int compile_id,
                                                 BasicType *in_sig_bt,
                                                 VMRegPair *in_regs,
--- a/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -69,7 +69,7 @@
                                  Register obj_reg, SystemDictionary::WKID klass_id,
                                  Register temp_reg, Register temp2_reg,
                                  const char* error_message) {
-  Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
+  InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
   KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
   bool did_save = false;
   if (temp_reg == noreg || temp2_reg == noreg) {
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1955,7 +1955,7 @@
 //    return to caller
 //
 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
-                                                methodHandle method,
+                                                const methodHandle& method,
                                                 int compile_id,
                                                 BasicType* in_sig_bt,
                                                 VMRegPair* in_regs,
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1667,8 +1667,8 @@
   Register Rtmp1 = noreg;
 
   // check if it needs to be profiled
-  ciMethodData* md;
-  ciProfileData* data;
+  ciMethodData* md = NULL;
+  ciProfileData* data = NULL;
 
   if (op->should_profile()) {
     ciMethod* method = op->profiled_method();
@@ -1827,8 +1827,8 @@
     CodeStub* stub = op->stub();
 
     // check if it needs to be profiled
-    ciMethodData* md;
-    ciProfileData* data;
+    ciMethodData* md = NULL;
+    ciProfileData* data = NULL;
 
     if (op->should_profile()) {
       ciMethod* method = op->profiled_method();
@@ -2005,7 +2005,8 @@
     case lir_cond_greater:      acond = Assembler::greater;      ncond = Assembler::lessEqual;    break;
     case lir_cond_belowEqual:   acond = Assembler::belowEqual;   ncond = Assembler::above;        break;
     case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;   ncond = Assembler::below;        break;
-    default:                    ShouldNotReachHere();
+    default:                    acond = Assembler::equal;        ncond = Assembler::notEqual;
+                                ShouldNotReachHere();
   }
 
   if (opr1->is_cpu_register()) {
@@ -3182,27 +3183,23 @@
   assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
 
   int elem_size = type2aelembytes(basic_type);
-  int shift_amount;
   Address::ScaleFactor scale;
 
   switch (elem_size) {
     case 1 :
-      shift_amount = 0;
       scale = Address::times_1;
       break;
     case 2 :
-      shift_amount = 1;
       scale = Address::times_2;
       break;
     case 4 :
-      shift_amount = 2;
       scale = Address::times_4;
       break;
     case 8 :
-      shift_amount = 3;
       scale = Address::times_8;
       break;
     default:
+      scale = Address::no_scale;
       ShouldNotReachHere();
   }
 
--- a/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -195,7 +195,7 @@
 
 
 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
-  LIR_Opr r;
+  LIR_Opr r = NULL;
   if (type == T_LONG) {
     r = LIR_OprFact::longConst(x);
   } else if (type == T_INT) {
@@ -484,7 +484,7 @@
     __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
     __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info));
 
-    address entry;
+    address entry = NULL;
     switch (x->op()) {
     case Bytecodes::_lrem:
       entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem);
@@ -1054,7 +1054,7 @@
 
 void LIRGenerator::do_Convert(Convert* x) {
   // flags that vary for the different operations and different SSE-settings
-  bool fixed_input, fixed_result, round_result, needs_stub;
+  bool fixed_input = false, fixed_result = false, round_result = false, needs_stub = false;
 
   switch (x->op()) {
     case Bytecodes::_i2l: // fall through
--- a/hotspot/src/cpu/x86/vm/jniFastGetField_x86_32.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/jniFastGetField_x86_32.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -48,7 +48,7 @@
 // between loads, which is much more efficient than lfence.
 
 address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
-  const char *name;
+  const char *name = NULL;
   switch (type) {
     case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break;
     case T_BYTE:    name = "jni_fast_GetByteField";    break;
@@ -122,7 +122,7 @@
 
   slowcase_entry_pclist[count++] = __ pc();
   __ bind (slow);
-  address slow_case_addr;
+  address slow_case_addr = NULL;
   switch (type) {
     case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break;
     case T_BYTE:    slow_case_addr = jni_GetByteField_addr();    break;
@@ -256,7 +256,7 @@
 }
 
 address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
-  const char *name;
+  const char *name = NULL;
   switch (type) {
     case T_FLOAT:  name = "jni_fast_GetFloatField";  break;
     case T_DOUBLE: name = "jni_fast_GetDoubleField"; break;
@@ -337,7 +337,7 @@
 
   slowcase_entry_pclist[count++] = __ pc();
   __ bind (slow);
-  address slow_case_addr;
+  address slow_case_addr = NULL;
   switch (type) {
     case T_FLOAT:  slow_case_addr = jni_GetFloatField_addr();  break;
     case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break;
--- a/hotspot/src/cpu/x86/vm/jniFastGetField_x86_64.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/jniFastGetField_x86_64.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -51,7 +51,7 @@
 // since that may scratch r10!
 
 address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
-  const char *name;
+  const char *name = NULL;
   switch (type) {
     case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break;
     case T_BYTE:    name = "jni_fast_GetByteField";    break;
@@ -111,7 +111,7 @@
 
   slowcase_entry_pclist[count++] = __ pc();
   __ bind (slow);
-  address slow_case_addr;
+  address slow_case_addr = NULL;
   switch (type) {
     case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break;
     case T_BYTE:    slow_case_addr = jni_GetByteField_addr();    break;
@@ -153,7 +153,7 @@
 }
 
 address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
-  const char *name;
+  const char *name = NULL;
   switch (type) {
     case T_FLOAT:     name = "jni_fast_GetFloatField";     break;
     case T_DOUBLE:    name = "jni_fast_GetDoubleField";    break;
@@ -206,7 +206,7 @@
 
   slowcase_entry_pclist[count++] = __ pc();
   __ bind (slow);
-  address slow_case_addr;
+  address slow_case_addr = NULL;
   switch (type) {
     case T_FLOAT:     slow_case_addr = jni_GetFloatField_addr();  break;
     case T_DOUBLE:    slow_case_addr = jni_GetDoubleField_addr();
--- a/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -63,7 +63,7 @@
 void MethodHandles::verify_klass(MacroAssembler* _masm,
                                  Register obj, SystemDictionary::WKID klass_id,
                                  const char* error_message) {
-  Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
+  InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
   KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
   Register temp = rdi;
   Register temp2 = noreg;
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1502,7 +1502,7 @@
 //    return to caller
 //
 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
-                                                methodHandle method,
+                                                const methodHandle& method,
                                                 int compile_id,
                                                 BasicType* in_sig_bt,
                                                 VMRegPair* in_regs,
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1694,7 +1694,7 @@
 };
 
 static void verify_oop_args(MacroAssembler* masm,
-                            methodHandle method,
+                            const methodHandle& method,
                             const BasicType* sig_bt,
                             const VMRegPair* regs) {
   Register temp_reg = rbx;  // not part of any compiled calling seq
@@ -1804,7 +1804,7 @@
 //    return to caller
 //
 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
-                                                methodHandle method,
+                                                const methodHandle& method,
                                                 int compile_id,
                                                 BasicType* in_sig_bt,
                                                 VMRegPair* in_regs,
--- a/hotspot/src/cpu/zero/vm/sharedRuntime_zero.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/cpu/zero/vm/sharedRuntime_zero.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -76,7 +76,7 @@
 }
 
 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
-                                                methodHandle method,
+                                                const methodHandle& method,
                                                 int compile_id,
                                                 BasicType *sig_bt,
                                                 VMRegPair *regs,
--- a/hotspot/src/os/aix/vm/os_aix.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/os/aix/vm/os_aix.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -3811,7 +3811,7 @@
 // able to use structured exception handling (thread-local exception filters)
 // on, e.g., Win32.
 void
-os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
+os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
                          JavaCallArguments* args, Thread* thread) {
   f(value, method, args, thread);
 }
--- a/hotspot/src/os/bsd/dtrace/libjvm_db.c	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/os/bsd/dtrace/libjvm_db.c	Fri Oct 30 00:02:37 2015 +0100
@@ -834,7 +834,7 @@
       if (stream_bci == vf->bci) {
           /* perfect match */
           if (debug > 2)
-              fprintf(stderr, "\t line_number_from_bci: END: exact line: %ld \n\n", vf->line);
+              fprintf(stderr, "\t line_number_from_bci: END: exact line: %d \n\n", vf->line);
           vf->line = stream_line;
           return PS_OK;
       } else {
@@ -843,14 +843,14 @@
               best_bci = stream_bci;
               vf->line = stream_line;
               if (debug > 2) {
-                  fprintf(stderr, "\t line_number_from_bci: best_bci: %ld, best_line: %ld\n",
+                  fprintf(stderr, "\t line_number_from_bci: best_bci: %d, best_line: %d\n",
                                    best_bci, vf->line);
               }
           }
       }
   }
   if (debug > 2)
-      fprintf(stderr, "\t line_number_from_bci: END: line: %ld \n\n", vf->line);
+      fprintf(stderr, "\t line_number_from_bci: END: line: %d \n\n", vf->line);
   return PS_OK;
 
  fail:
@@ -1002,7 +1002,7 @@
       err = line_number_from_bci(N->J, vf);
       CHECK_FAIL(err);
       if (debug > 2) {
-        fprintf(stderr, "\t scopeDesc_chain: method: %#8llx, line: %ld\n",
+        fprintf(stderr, "\t scopeDesc_chain: method: %#8llx, line: %d\n",
                 vf->method, vf->line);
       }
     }
@@ -1338,7 +1338,7 @@
   jframe->bci = vf->bci;
   jframe->line = vf->line;
   if (debug) {
-      fprintf(stderr, "\t Jget_vframe: method name: %s, line: %ld\n",
+      fprintf(stderr, "\t Jget_vframe: method name: %s, line: %d\n",
                        name, vf->line);
   }
   return PS_OK;
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -440,6 +440,10 @@
     if (pslash != NULL) {
       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
     }
+#ifdef STATIC_BUILD
+    strcat(buf, "/lib");
+#endif
+
     Arguments::set_dll_dir(buf);
 
     if (pslash != NULL) {
@@ -1388,6 +1392,9 @@
 
 #ifdef __APPLE__
 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
+#ifdef STATIC_BUILD
+  return os::get_default_process_handle();
+#else
   void * result= ::dlopen(filename, RTLD_LAZY);
   if (result != NULL) {
     // Successful loading
@@ -1399,9 +1406,13 @@
   ebuf[ebuflen-1]='\0';
 
   return NULL;
+#endif // STATIC_BUILD
 }
 #else
 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
+#ifdef STATIC_BUILD
+  return os::get_default_process_handle();
+#else
   void * result= ::dlopen(filename, RTLD_LAZY);
   if (result != NULL) {
     // Successful loading
@@ -1574,6 +1585,7 @@
   }
 
   return NULL;
+#endif // STATIC_BUILD
 }
 #endif // !__APPLE__
 
@@ -3745,7 +3757,7 @@
 // able to use structured exception handling (thread-local exception filters)
 // on, e.g., Win32.
 void os::os_exception_wrapper(java_call_t f, JavaValue* value,
-                              methodHandle* method, JavaCallArguments* args,
+                              const methodHandle& method, JavaCallArguments* args,
                               Thread* thread) {
   f(value, method, args, thread);
 }
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -2794,7 +2794,7 @@
 
 
 int os::Linux::sched_getcpu_syscall(void) {
-  unsigned int cpu;
+  unsigned int cpu = 0;
   int retval = -1;
 
 #if defined(IA32)
@@ -4187,8 +4187,8 @@
       sigaddset(&(actp->sa_mask), sig);
     }
 
-    sa_handler_t hand;
-    sa_sigaction_t sa;
+    sa_handler_t hand = NULL;
+    sa_sigaction_t sa = NULL;
     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
     // retrieve the chained handler
     if (siginfo_flag_set) {
@@ -4393,7 +4393,7 @@
 
 static const char* get_signal_handler_name(address handler,
                                            char* buf, int buflen) {
-  int offset;
+  int offset = 0;
   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
   if (found) {
     // skip directory names
@@ -4960,7 +4960,7 @@
 // able to use structured exception handling (thread-local exception filters)
 // on, e.g., Win32.
 void
-os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
+os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
                          JavaCallArguments* args, Thread* thread) {
   f(value, method, args, thread);
 }
--- a/hotspot/src/os/solaris/dtrace/libjvm_db.c	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/os/solaris/dtrace/libjvm_db.c	Fri Oct 30 00:02:37 2015 +0100
@@ -834,7 +834,7 @@
       if (stream_bci == vf->bci) {
           /* perfect match */
           if (debug > 2)
-              fprintf(stderr, "\t line_number_from_bci: END: exact line: %ld \n\n", vf->line);
+              fprintf(stderr, "\t line_number_from_bci: END: exact line: %d \n\n", vf->line);
           vf->line = stream_line;
           return PS_OK;
       } else {
@@ -843,14 +843,14 @@
               best_bci = stream_bci;
               vf->line = stream_line;
               if (debug > 2) {
-                  fprintf(stderr, "\t line_number_from_bci: best_bci: %ld, best_line: %ld\n",
+                  fprintf(stderr, "\t line_number_from_bci: best_bci: %d, best_line: %d\n",
                                    best_bci, vf->line);
               }
           }
       }
   }
   if (debug > 2)
-      fprintf(stderr, "\t line_number_from_bci: END: line: %ld \n\n", vf->line);
+      fprintf(stderr, "\t line_number_from_bci: END: line: %d \n\n", vf->line);
   return PS_OK;
 
  fail:
@@ -1002,7 +1002,7 @@
       err = line_number_from_bci(N->J, vf);
       CHECK_FAIL(err);
       if (debug > 2) {
-        fprintf(stderr, "\t scopeDesc_chain: method: %#8llx, line: %ld\n",
+        fprintf(stderr, "\t scopeDesc_chain: method: %#8llx, line: %d\n",
                 vf->method, vf->line);
       }
     }
@@ -1338,7 +1338,7 @@
   jframe->bci = vf->bci;
   jframe->line = vf->line;
   if (debug) {
-      fprintf(stderr, "\t Jget_vframe: method name: %s, line: %ld\n",
+      fprintf(stderr, "\t Jget_vframe: method name: %s, line: %d\n",
                        name, vf->line);
   }
   return PS_OK;
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -3774,7 +3774,7 @@
 // This does not do anything on Solaris. This is basically a hook for being
 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
 void os::os_exception_wrapper(java_call_t f, JavaValue* value,
-                              methodHandle* method, JavaCallArguments* args,
+                              const methodHandle& method, JavaCallArguments* args,
                               Thread* thread) {
   f(value, method, args, thread);
 }
--- a/hotspot/src/os_cpu/linux_x86/vm/copy_linux_x86.inline.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/os_cpu/linux_x86/vm/copy_linux_x86.inline.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -30,7 +30,7 @@
   (void)memmove(to, from, count * HeapWordSize);
 #else
   // Includes a zero-count check.
-  intx temp;
+  intx temp = 0;
   __asm__ volatile("        testl   %6,%6         ;"
                    "        jz      7f            ;"
                    "        cmpl    %4,%5         ;"
@@ -88,7 +88,7 @@
   }
 #else
   // Includes a zero-count check.
-  intx temp;
+  intx temp = 0;
   __asm__ volatile("        testl   %6,%6       ;"
                    "        jz      3f          ;"
                    "        cmpl    $32,%6      ;"
@@ -145,7 +145,7 @@
   (void)memmove(to, from, count);
 #else
   // Includes a zero-count check.
-  intx temp;
+  intx temp = 0;
   __asm__ volatile("        testl   %6,%6          ;"
                    "        jz      13f            ;"
                    "        cmpl    %4,%5          ;"
--- a/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -71,7 +71,7 @@
 extern LONG WINAPI topLevelExceptionFilter(_EXCEPTION_POINTERS* );
 
 // Install a win32 structured exception handler around thread.
-void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
+void os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method, JavaCallArguments* args, Thread* thread) {
   __try {
 
 #ifndef AMD64
--- a/hotspot/src/share/vm/asm/codeBuffer.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/asm/codeBuffer.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -509,7 +509,7 @@
   }
 }
 
-void CodeBuffer::finalize_oop_references(methodHandle mh) {
+void CodeBuffer::finalize_oop_references(const methodHandle& mh) {
   No_Safepoint_Verifier nsv;
 
   GrowableArray<oop> oops;
--- a/hotspot/src/share/vm/asm/codeBuffer.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/asm/codeBuffer.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -541,7 +541,7 @@
   bool insts_contains2(address pc) const { return _insts.contains2(pc); }
 
   // Record any extra oops required to keep embedded metadata alive
-  void finalize_oop_references(methodHandle method);
+  void finalize_oop_references(const methodHandle& method);
 
   // Allocated size in all sections, when aligned and concatenated
   // (this is the eventual state of the content in its final
--- a/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -639,7 +639,7 @@
 
   if (l == r && !lt->is_float_kind()) {
     // pattern: If (a cond a) => simplify to Goto
-    BlockBegin* sux;
+    BlockBegin* sux = NULL;
     switch (x->cond()) {
     case If::eql: sux = x->sux_for(true);  break;
     case If::neq: sux = x->sux_for(false); break;
@@ -647,6 +647,7 @@
     case If::leq: sux = x->sux_for(true);  break;
     case If::gtr: sux = x->sux_for(false); break;
     case If::geq: sux = x->sux_for(true);  break;
+    default: ShouldNotReachHere();
     }
     // If is a safepoint then the debug information should come from the state_before of the If.
     set_canonical(new Goto(sux, x->state_before(), is_safepoint(x, sux)));
@@ -684,7 +685,7 @@
       } else {
         // two successors differ and two successors are the same => simplify to: If (x cmp y)
         // determine new condition & successors
-        If::Condition cond;
+        If::Condition cond = If::eql;
         BlockBegin* tsux = NULL;
         BlockBegin* fsux = NULL;
              if (lss_sux == eql_sux) { cond = If::leq; tsux = lss_sux; fsux = gtr_sux; }
--- a/hotspot/src/share/vm/c1/c1_Compiler.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_Compiler.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -98,7 +98,7 @@
   return buffer_blob;
 }
 
-bool Compiler::is_intrinsic_supported(methodHandle method) {
+bool Compiler::is_intrinsic_supported(const methodHandle& method) {
   vmIntrinsics::ID id = method->intrinsic_id();
   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
 
--- a/hotspot/src/share/vm/c1/c1_Compiler.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_Compiler.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -56,7 +56,7 @@
   virtual void print_timers();
 
   // Check if the C1 compiler supports an intrinsic for 'method'.
-  virtual bool is_intrinsic_supported(methodHandle method);
+  virtual bool is_intrinsic_supported(const methodHandle& method);
 
   // Size of the code buffer
   static int code_buffer_size();
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -3823,8 +3823,8 @@
   caller_state->truncate_stack(args_base);
   assert(callee_state->stack_size() == 0, "callee stack must be empty");
 
-  Value lock;
-  BlockBegin* sync_handler;
+  Value lock = NULL;
+  BlockBegin* sync_handler = NULL;
 
   // Inline the locking of the receiver if the callee is synchronized
   if (callee->is_synchronized()) {
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -3348,7 +3348,7 @@
 }
 
 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) {
-  int freq_log;
+  int freq_log = 0;
   int level = compilation()->env()->comp_level();
   if (level == CompLevel_limited_profile) {
     freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
@@ -3394,7 +3394,7 @@
   assert(level > CompLevel_simple, "Shouldn't be here");
 
   int offset = -1;
-  LIR_Opr counter_holder;
+  LIR_Opr counter_holder = NULL;
   if (level == CompLevel_limited_profile) {
     MethodCounters* counters_adr = method->ensure_method_counters();
     if (counters_adr == NULL) {
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -409,7 +409,7 @@
   }
 
   static LIR_Condition lir_cond(If::Condition cond) {
-    LIR_Condition l;
+    LIR_Condition l = lir_cond_unknown;
     switch (cond) {
     case If::eql: l = lir_cond_equal;        break;
     case If::neq: l = lir_cond_notEqual;     break;
@@ -419,6 +419,7 @@
     case If::gtr: l = lir_cond_greater;      break;
     case If::aeq: l = lir_cond_aboveEqual;   break;
     case If::beq: l = lir_cond_belowEqual;   break;
+    default: fatal("You must pass valid If::Condition");
     };
     return l;
   }
--- a/hotspot/src/share/vm/ci/ciArrayKlass.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciArrayKlass.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,7 @@
 //
 // Loaded array klass.
 ciArrayKlass::ciArrayKlass(KlassHandle h_k) : ciKlass(h_k) {
-  assert(get_Klass()->oop_is_array(), "wrong type");
+  assert(get_Klass()->is_array_klass(), "wrong type");
   _dimension = get_ArrayKlass()->dimension();
 }
 
--- a/hotspot/src/share/vm/ci/ciEnv.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -365,11 +365,11 @@
     return true;
   }
 
-  if (resolved_klass->oop_is_objArray()) {
+  if (resolved_klass->is_objArray_klass()) {
     // Find the element klass, if this is an array.
     resolved_klass = ObjArrayKlass::cast(resolved_klass)->bottom_klass();
   }
-  if (resolved_klass->oop_is_instance()) {
+  if (resolved_klass->is_instance_klass()) {
     return Reflection::verify_class_access(accessing_klass->get_Klass(),
                                            resolved_klass,
                                            true);
@@ -380,7 +380,7 @@
 // ------------------------------------------------------------------
 // ciEnv::get_klass_by_name_impl
 ciKlass* ciEnv::get_klass_by_name_impl(ciKlass* accessing_klass,
-                                       constantPoolHandle cpool,
+                                       const constantPoolHandle& cpool,
                                        ciSymbol* name,
                                        bool require_local) {
   ASSERT_IN_VM;
@@ -502,7 +502,7 @@
 // ciEnv::get_klass_by_index_impl
 //
 // Implementation of get_klass_by_index.
-ciKlass* ciEnv::get_klass_by_index_impl(constantPoolHandle cpool,
+ciKlass* ciEnv::get_klass_by_index_impl(const constantPoolHandle& cpool,
                                         int index,
                                         bool& is_accessible,
                                         ciInstanceKlass* accessor) {
@@ -559,7 +559,7 @@
 // ciEnv::get_klass_by_index
 //
 // Get a klass from the constant pool.
-ciKlass* ciEnv::get_klass_by_index(constantPoolHandle cpool,
+ciKlass* ciEnv::get_klass_by_index(const constantPoolHandle& cpool,
                                    int index,
                                    bool& is_accessible,
                                    ciInstanceKlass* accessor) {
@@ -570,7 +570,7 @@
 // ciEnv::get_constant_by_index_impl
 //
 // Implementation of get_constant_by_index().
-ciConstant ciEnv::get_constant_by_index_impl(constantPoolHandle cpool,
+ciConstant ciEnv::get_constant_by_index_impl(const constantPoolHandle& cpool,
                                              int pool_index, int cache_index,
                                              ciInstanceKlass* accessor) {
   bool ignore_will_link;
@@ -656,7 +656,7 @@
 // Pull a constant out of the constant pool.  How appropriate.
 //
 // Implementation note: this query is currently in no way cached.
-ciConstant ciEnv::get_constant_by_index(constantPoolHandle cpool,
+ciConstant ciEnv::get_constant_by_index(const constantPoolHandle& cpool,
                                         int pool_index, int cache_index,
                                         ciInstanceKlass* accessor) {
   GUARDED_VM_ENTRY(return get_constant_by_index_impl(cpool, pool_index, cache_index, accessor);)
@@ -736,7 +736,7 @@
 
 // ------------------------------------------------------------------
 // ciEnv::get_method_by_index_impl
-ciMethod* ciEnv::get_method_by_index_impl(constantPoolHandle cpool,
+ciMethod* ciEnv::get_method_by_index_impl(const constantPoolHandle& cpool,
                                           int index, Bytecodes::Code bc,
                                           ciInstanceKlass* accessor) {
   if (bc == Bytecodes::_invokedynamic) {
@@ -848,7 +848,7 @@
 
 // ------------------------------------------------------------------
 // ciEnv::get_method_by_index
-ciMethod* ciEnv::get_method_by_index(constantPoolHandle cpool,
+ciMethod* ciEnv::get_method_by_index(const constantPoolHandle& cpool,
                                      int index, Bytecodes::Code bc,
                                      ciInstanceKlass* accessor) {
   GUARDED_VM_ENTRY(return get_method_by_index_impl(cpool, index, bc, accessor);)
--- a/hotspot/src/share/vm/ci/ciEnv.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciEnv.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -118,34 +118,34 @@
                              bool require_local);
 
   // Constant pool access.
-  ciKlass*   get_klass_by_index(constantPoolHandle cpool,
+  ciKlass*   get_klass_by_index(const constantPoolHandle& cpool,
                                 int klass_index,
                                 bool& is_accessible,
                                 ciInstanceKlass* loading_klass);
-  ciConstant get_constant_by_index(constantPoolHandle cpool,
+  ciConstant get_constant_by_index(const constantPoolHandle& cpool,
                                    int pool_index, int cache_index,
                                    ciInstanceKlass* accessor);
   ciField*   get_field_by_index(ciInstanceKlass* loading_klass,
                                 int field_index);
-  ciMethod*  get_method_by_index(constantPoolHandle cpool,
+  ciMethod*  get_method_by_index(const constantPoolHandle& cpool,
                                  int method_index, Bytecodes::Code bc,
                                  ciInstanceKlass* loading_klass);
 
   // Implementation methods for loading and constant pool access.
   ciKlass* get_klass_by_name_impl(ciKlass* accessing_klass,
-                                  constantPoolHandle cpool,
+                                  const constantPoolHandle& cpool,
                                   ciSymbol* klass_name,
                                   bool require_local);
-  ciKlass*   get_klass_by_index_impl(constantPoolHandle cpool,
+  ciKlass*   get_klass_by_index_impl(const constantPoolHandle& cpool,
                                      int klass_index,
                                      bool& is_accessible,
                                      ciInstanceKlass* loading_klass);
-  ciConstant get_constant_by_index_impl(constantPoolHandle cpool,
+  ciConstant get_constant_by_index_impl(const constantPoolHandle& cpool,
                                         int pool_index, int cache_index,
                                         ciInstanceKlass* loading_klass);
   ciField*   get_field_by_index_impl(ciInstanceKlass* loading_klass,
                                      int field_index);
-  ciMethod*  get_method_by_index_impl(constantPoolHandle cpool,
+  ciMethod*  get_method_by_index_impl(const constantPoolHandle& cpool,
                                       int method_index, Bytecodes::Code bc,
                                       ciInstanceKlass* loading_klass);
 
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,7 +46,7 @@
 ciInstanceKlass::ciInstanceKlass(KlassHandle h_k) :
   ciKlass(h_k)
 {
-  assert(get_Klass()->oop_is_instance(), "wrong type");
+  assert(get_Klass()->is_instance_klass(), "wrong type");
   assert(get_instanceKlass()->is_loaded(), "must be at least loaded");
   InstanceKlass* ik = get_instanceKlass();
 
@@ -356,7 +356,7 @@
   VM_ENTRY_MARK;
   InstanceKlass* ik = get_instanceKlass();
   Klass* up = ik->up_cast_abstract();
-  assert(up->oop_is_instance(), "must be InstanceKlass");
+  assert(up->is_instance_klass(), "must be InstanceKlass");
   if (ik == up) {
     return NULL;
   }
@@ -683,7 +683,7 @@
   // Try to record related loaded classes
   Klass* sub = ik->subklass();
   while (sub != NULL) {
-    if (sub->oop_is_instance()) {
+    if (sub->is_instance_klass()) {
       out->print_cr("instanceKlass %s", sub->name()->as_quoted_ascii());
     }
     sub = sub->next_sibling();
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -81,7 +81,7 @@
   ciInstanceKlass(ciSymbol* name, jobject loader, jobject protection_domain);
 
   InstanceKlass* get_instanceKlass() const {
-    return (InstanceKlass*)get_Klass();
+    return InstanceKlass::cast(get_Klass());
   }
 
   oop loader();
--- a/hotspot/src/share/vm/ci/ciMethod.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciMethod.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -745,7 +745,7 @@
 #ifndef PRODUCT
   if (TraceDependencies && target() != NULL && target() != root_m->get_Method()) {
     tty->print("found a non-root unique target method");
-    tty->print_cr("  context = %s", InstanceKlass::cast(actual_recv->get_Klass())->external_name());
+    tty->print_cr("  context = %s", actual_recv->get_Klass()->external_name());
     tty->print("  method  = ");
     target->print_short_name(tty);
     tty->cr();
@@ -791,7 +791,7 @@
    methodHandle m;
    // Only do exact lookup if receiver klass has been linked.  Otherwise,
    // the vtable has not been setup, and the LinkResolver will fail.
-   if (h_recv->oop_is_array()
+   if (h_recv->is_array_klass()
         ||
        InstanceKlass::cast(h_recv())->is_linked() && !exact_receiver->is_interface()) {
      if (holder()->is_interface()) {
--- a/hotspot/src/share/vm/ci/ciObjArrayKlass.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciObjArrayKlass.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,7 +39,7 @@
 //
 // Constructor for loaded object array klasses.
 ciObjArrayKlass::ciObjArrayKlass(KlassHandle h_k) : ciArrayKlass(h_k) {
-  assert(get_Klass()->oop_is_objArray(), "wrong type");
+  assert(get_Klass()->is_objArray_klass(), "wrong type");
   Klass* element_Klass = get_ObjArrayKlass()->bottom_klass();
   _base_element_klass = CURRENT_ENV->get_klass(element_Klass);
   assert(_base_element_klass->is_instance_klass() ||
--- a/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -357,7 +357,7 @@
   // Hold metadata from unloading by keeping it's holder alive.
   if (_initialized && o->is_klass()) {
     Klass* holder = ((Klass*)o);
-    if (holder->oop_is_instance() && InstanceKlass::cast(holder)->is_anonymous()) {
+    if (holder->is_instance_klass() && InstanceKlass::cast(holder)->is_anonymous()) {
       // Though ciInstanceKlass records class loader oop, it's not enough to keep
       // VM anonymous classes alive (loader == NULL). Klass holder should be used instead.
       // It is enough to record a ciObject, since cached elements are never removed
@@ -370,11 +370,11 @@
   if (o->is_klass()) {
     KlassHandle h_k(THREAD, (Klass*)o);
     Klass* k = (Klass*)o;
-    if (k->oop_is_instance()) {
+    if (k->is_instance_klass()) {
       return new (arena()) ciInstanceKlass(h_k);
-    } else if (k->oop_is_objArray()) {
+    } else if (k->is_objArray_klass()) {
       return new (arena()) ciObjArrayKlass(h_k);
-    } else if (k->oop_is_typeArray()) {
+    } else if (k->is_typeArray_klass()) {
       return new (arena()) ciTypeArrayKlass(h_k);
     }
   } else if (o->is_method()) {
@@ -414,6 +414,7 @@
     metadata_owner_klass = m->as_method()->get_Method()->constants()->pool_holder();
   } else {
     fatal("Not implemented for other types of metadata");
+    return;
   }
 
   oop metadata_holder = metadata_owner_klass->klass_holder();
--- a/hotspot/src/share/vm/ci/ciReplay.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciReplay.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -530,15 +530,15 @@
     if (_imethod != NULL) {
       return; // Replay Inlining
     }
-    Klass* k = method->method_holder();
-    ((InstanceKlass*)k)->initialize(THREAD);
+    InstanceKlass* ik = method->method_holder();
+    ik->initialize(THREAD);
     if (HAS_PENDING_EXCEPTION) {
       oop throwable = PENDING_EXCEPTION;
       java_lang_Throwable::print(throwable, tty);
       tty->cr();
       if (ReplayIgnoreInitErrors) {
         CLEAR_PENDING_EXCEPTION;
-        ((InstanceKlass*)k)->set_init_state(InstanceKlass::fully_initialized);
+        ik->set_init_state(InstanceKlass::fully_initialized);
       } else {
         return;
       }
@@ -842,7 +842,7 @@
       } else if (field_signature[0] == 'L') {
         Symbol* klass_name = SymbolTable::lookup(field_signature, (int)strlen(field_signature), CHECK);
         KlassHandle kelem = resolve_klass(field_signature, CHECK);
-        oop value = ((InstanceKlass*)kelem())->allocate_instance(CHECK);
+        oop value = InstanceKlass::cast(kelem())->allocate_instance(CHECK);
         java_mirror->obj_field_put(fd.offset(), value);
       } else {
         report_error("unhandled staticfield");
--- a/hotspot/src/share/vm/ci/ciSignature.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciSignature.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,7 +36,7 @@
 
 // ------------------------------------------------------------------
 // ciSignature::ciSignature
-ciSignature::ciSignature(ciKlass* accessing_klass, constantPoolHandle cpool, ciSymbol* symbol) {
+ciSignature::ciSignature(ciKlass* accessing_klass, const constantPoolHandle& cpool, ciSymbol* symbol) {
   ASSERT_IN_VM;
   EXCEPTION_CONTEXT;
   _accessing_klass = accessing_klass;
--- a/hotspot/src/share/vm/ci/ciSignature.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciSignature.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,7 +46,7 @@
   friend class ciBytecodeStream;
   friend class ciObjectFactory;
 
-  ciSignature(ciKlass* accessing_klass, constantPoolHandle cpool, ciSymbol* signature);
+  ciSignature(ciKlass* accessing_klass, const constantPoolHandle& cpool, ciSymbol* signature);
   ciSignature(ciKlass* accessing_klass,                           ciSymbol* signature, ciMethodType* method_type);
 
   void get_all_klasses();
--- a/hotspot/src/share/vm/ci/ciType.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciType.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,7 @@
 }
 
 ciType::ciType(KlassHandle k) : ciMetadata(k()) {
-  _basic_type = k()->oop_is_array() ? T_ARRAY : T_OBJECT;
+  _basic_type = k()->is_array_klass() ? T_ARRAY : T_OBJECT;
 }
 
 
--- a/hotspot/src/share/vm/ci/ciTypeArrayKlass.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciTypeArrayKlass.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
 // ------------------------------------------------------------------
 // ciTypeArrayKlass::ciTypeArrayKlass
 ciTypeArrayKlass::ciTypeArrayKlass(KlassHandle h_k) : ciArrayKlass(h_k) {
-  assert(get_Klass()->oop_is_typeArray(), "wrong type");
+  assert(get_Klass()->is_typeArray_klass(), "wrong type");
   assert(element_type() == get_TypeArrayKlass()->element_type(), "");
 }
 
--- a/hotspot/src/share/vm/classfile/bytecodeAssembler.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/classfile/bytecodeAssembler.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -140,7 +140,7 @@
   BytecodeCPEntry const& at(u2 index) const { return _entries.at(index); }
 
   InstanceKlass* pool_holder() const {
-    return InstanceKlass::cast(_orig->pool_holder());
+    return _orig->pool_holder();
   }
 
   u2 utf8(Symbol* sym) {
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -315,6 +315,7 @@
     return NULL;
 }
 
+#ifdef ASSERT
 PRAGMA_DIAG_PUSH
 PRAGMA_FORMAT_NONLITERAL_IGNORED
 void ClassFileParser::report_assert_property_failure(const char* msg, TRAPS) {
@@ -327,6 +328,7 @@
   fatal(msg, index, _class_name->as_C_string());
 }
 PRAGMA_DIAG_POP
+#endif
 
 constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
   ClassFileStream* cfs = stream();
@@ -646,7 +648,7 @@
 }
 
 
-void ClassFileParser::patch_constant_pool(constantPoolHandle cp, int index, Handle patch, TRAPS) {
+void ClassFileParser::patch_constant_pool(const constantPoolHandle& cp, int index, Handle patch, TRAPS) {
   BasicType patch_type = T_VOID;
 
   switch (cp->tag_at(index).value()) {
@@ -822,7 +824,7 @@
       debug_only(No_Safepoint_Verifier nsv;)
       for (index = 0; index < length; index++) {
         Klass* k = _local_interfaces->at(index);
-        Symbol* name = InstanceKlass::cast(k)->name();
+        Symbol* name = k->name();
         // If no duplicates, add (name, NULL) in hashtable interface_names.
         if (!put_after_lookup(name, NULL, interface_names)) {
           dup = true;
@@ -3175,8 +3177,9 @@
     bool is_array = false;
     if (_cp->tag_at(super_class_index).is_klass()) {
       super_klass = instanceKlassHandle(THREAD, _cp->resolved_klass_at(super_class_index));
-      if (_need_verify)
-        is_array = super_klass->oop_is_array();
+      if (_need_verify) {
+        is_array = super_klass->is_array_klass();
+      }
     } else if (_need_verify) {
       is_array = (_cp->klass_name_at(super_class_index)->byte_at(0) == JVM_SIGNATURE_ARRAY);
     }
@@ -3211,19 +3214,19 @@
 
   // Field size and offset computation
   int nonstatic_field_size = _super_klass() == NULL ? 0 : _super_klass()->nonstatic_field_size();
-  int next_static_oop_offset;
-  int next_static_double_offset;
-  int next_static_word_offset;
-  int next_static_short_offset;
-  int next_static_byte_offset;
-  int next_nonstatic_oop_offset;
-  int next_nonstatic_double_offset;
-  int next_nonstatic_word_offset;
-  int next_nonstatic_short_offset;
-  int next_nonstatic_byte_offset;
-  int first_nonstatic_oop_offset;
-  int next_nonstatic_field_offset;
-  int next_nonstatic_padded_offset;
+  int next_static_oop_offset = 0;
+  int next_static_double_offset = 0;
+  int next_static_word_offset = 0;
+  int next_static_short_offset = 0;
+  int next_static_byte_offset = 0;
+  int next_nonstatic_oop_offset = 0;
+  int next_nonstatic_double_offset = 0;
+  int next_nonstatic_word_offset = 0;
+  int next_nonstatic_short_offset = 0;
+  int next_nonstatic_byte_offset = 0;
+  int first_nonstatic_oop_offset = 0;
+  int next_nonstatic_field_offset = 0;
+  int next_nonstatic_padded_offset = 0;
 
   // Count the contended fields by type.
   //
@@ -3376,14 +3379,14 @@
     ShouldNotReachHere();
   }
 
-  int nonstatic_oop_space_count   = 0;
-  int nonstatic_word_space_count  = 0;
-  int nonstatic_short_space_count = 0;
-  int nonstatic_byte_space_count  = 0;
-  int nonstatic_oop_space_offset;
-  int nonstatic_word_space_offset;
-  int nonstatic_short_space_offset;
-  int nonstatic_byte_space_offset;
+  int nonstatic_oop_space_count    = 0;
+  int nonstatic_word_space_count   = 0;
+  int nonstatic_short_space_count  = 0;
+  int nonstatic_byte_space_count   = 0;
+  int nonstatic_oop_space_offset   = 0;
+  int nonstatic_word_space_offset  = 0;
+  int nonstatic_short_space_offset = 0;
+  int nonstatic_byte_space_offset  = 0;
 
   // Try to squeeze some of the fields into the gaps due to
   // long/double alignment.
@@ -3455,7 +3458,7 @@
     // contended instance fields are handled below
     if (fs.is_contended() && !fs.access_flags().is_static()) continue;
 
-    int real_offset;
+    int real_offset = 0;
     FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
 
     // pack the rest of the fields
@@ -3589,7 +3592,7 @@
         // handle statics below
         if (fs.access_flags().is_static()) continue;
 
-        int real_offset;
+        int real_offset = 0;
         FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
 
         switch (atype) {
@@ -4111,7 +4114,7 @@
     this_klass->set_should_verify_class(verify);
     jint lh = Klass::instance_layout_helper(info.instance_size, false);
     this_klass->set_layout_helper(lh);
-    assert(this_klass->oop_is_instance(), "layout is correct");
+    assert(this_klass->is_instance_klass(), "layout is correct");
     assert(this_klass->size_helper() == info.instance_size, "correct size_helper");
     // Not yet: supers are done below to support the new subtype-checking fields
     //this_klass->set_super(super_klass());
@@ -4315,13 +4318,13 @@
         if (caller != NULL) {
           tty->print("[Loaded %s by instance of %s]\n",
                      this_klass->external_name(),
-                     InstanceKlass::cast(caller)->external_name());
+                     caller->external_name());
         } else {
           tty->print("[Loaded %s]\n", this_klass->external_name());
         }
       } else {
         tty->print("[Loaded %s from %s]\n", this_klass->external_name(),
-                   InstanceKlass::cast(class_loader->klass())->external_name());
+                   class_loader->klass()->external_name());
       }
     }
 
@@ -4330,7 +4333,7 @@
       // print out the superclass.
       const char * from = this_klass()->external_name();
       if (this_klass->java_super() != NULL) {
-        tty->print("RESOLVE %s %s (super)\n", from, InstanceKlass::cast(this_klass->java_super())->external_name());
+        tty->print("RESOLVE %s %s (super)\n", from, this_klass->java_super()->external_name());
       }
       // print out each of the interface classes referred to by this class.
       Array<Klass*>* local_interfaces = this_klass->local_interfaces();
@@ -4338,8 +4341,7 @@
         int length = local_interfaces->length();
         for (int i = 0; i < length; i++) {
           Klass* k = local_interfaces->at(i);
-          InstanceKlass* to_class = InstanceKlass::cast(k);
-          const char * to = to_class->external_name();
+          const char * to = k->external_name();
           tty->print("RESOLVE %s %s (interface)\n", from, to);
         }
       }
@@ -4411,7 +4413,7 @@
 
 void ClassFileParser::print_field_layout(Symbol* name,
                                          Array<u2>* fields,
-                                         constantPoolHandle cp,
+                                         const constantPoolHandle& cp,
                                          int instance_size,
                                          int instance_fields_start,
                                          int instance_fields_end,
@@ -4687,7 +4689,7 @@
       vmSymbols::java_lang_IllegalAccessError(),
       "class %s cannot access its superclass %s",
       this_klass->external_name(),
-      InstanceKlass::cast(super)->external_name()
+      super->external_name()
     );
     return;
   }
@@ -4707,7 +4709,7 @@
         vmSymbols::java_lang_IllegalAccessError(),
         "class %s cannot access its superinterface %s",
         this_klass->external_name(),
-        InstanceKlass::cast(k)->external_name()
+        k->external_name()
       );
       return;
     }
--- a/hotspot/src/share/vm/classfile/classFileParser.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/classfile/classFileParser.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -241,7 +241,7 @@
 
   void print_field_layout(Symbol* name,
                           Array<u2>* fields,
-                          constantPoolHandle cp,
+                          const constantPoolHandle& cp,
                           int instance_size,
                           int instance_fields_start,
                           int instance_fields_end,
@@ -319,8 +319,8 @@
     if (!b) { classfile_parse_error(msg, CHECK); }
   }
 
-  void report_assert_property_failure(const char* msg, TRAPS);
-  void report_assert_property_failure(const char* msg, int index, TRAPS);
+  void report_assert_property_failure(const char* msg, TRAPS) PRODUCT_RETURN;
+  void report_assert_property_failure(const char* msg, int index, TRAPS) PRODUCT_RETURN;
 
   inline void assert_property(bool b, const char* msg, TRAPS) {
 #ifdef ASSERT
@@ -403,7 +403,7 @@
     assert(!has_cp_patch_at(index), "");
     return patch;
   }
-  void patch_constant_pool(constantPoolHandle cp, int index, Handle patch, TRAPS);
+  void patch_constant_pool(const constantPoolHandle& cp, int index, Handle patch, TRAPS);
 
   // Wrapper for constantTag.is_klass_[or_]reference.
   // In older versions of the VM, Klass*s cannot sneak into early phases of
--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -140,7 +140,7 @@
 
 void ClassLoaderData::methods_do(void f(Method*)) {
   for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
-    if (k->oop_is_instance()) {
+    if (k->is_instance_klass()) {
       InstanceKlass::cast(k)->methods_do(f);
     }
   }
@@ -151,7 +151,7 @@
   MutexLockerEx ml(metaspace_lock(),  Mutex::_no_safepoint_check_flag);
   for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
     // Do not filter ArrayKlass oops here...
-    if (k->oop_is_array() || (k->oop_is_instance() && InstanceKlass::cast(k)->is_loaded())) {
+    if (k->is_array_klass() || (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded())) {
       klass_closure->do_klass(k);
     }
   }
@@ -159,7 +159,7 @@
 
 void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
   for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
-    if (k->oop_is_instance()) {
+    if (k->is_instance_klass()) {
       f(InstanceKlass::cast(k));
     }
     assert(k != k->next_link(), "no loops!");
--- a/hotspot/src/share/vm/classfile/compactHashtable.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/classfile/compactHashtable.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -422,7 +422,7 @@
 
 int HashtableTextDump::scan_string_prefix() {
   // Expect /[0-9]+: /
-  int utf8_length;
+  int utf8_length = 0;
   get_num(':', &utf8_length);
   if (*_p != ' ') {
     corrupted(_p, "Wrong prefix format for string");
@@ -433,13 +433,13 @@
 
 int HashtableTextDump::scan_symbol_prefix() {
   // Expect /[0-9]+ (-|)[0-9]+: /
-  int utf8_length;
+  int utf8_length = 0;
   get_num(' ', &utf8_length);
-    if (*_p == '-') {
-     _p++;
+  if (*_p == '-') {
+    _p++;
   }
   int ref_num;
-  (void)get_num(':', &ref_num);
+  get_num(':', &ref_num);
   if (*_p != ' ') {
     corrupted(_p, "Wrong prefix format for symbol");
   }
--- a/hotspot/src/share/vm/classfile/dictionary.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/classfile/dictionary.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,7 +60,7 @@
   DictionaryEntry* entry = (DictionaryEntry*)Hashtable<Klass*, mtClass>::new_entry(hash, klass);
   entry->set_loader_data(loader_data);
   entry->set_pd_set(NULL);
-  assert(klass->oop_is_instance(), "Must be");
+  assert(klass->is_instance_klass(), "Must be");
   return entry;
 }
 
@@ -78,7 +78,7 @@
 
 bool DictionaryEntry::contains_protection_domain(oop protection_domain) const {
 #ifdef ASSERT
-  if (protection_domain == InstanceKlass::cast(klass())->protection_domain()) {
+  if (protection_domain == klass()->protection_domain()) {
     // Ensure this doesn't show up in the pd_set (invariant)
     bool in_pd_set = false;
     for (ProtectionDomainEntry* current = _pd_set;
@@ -96,7 +96,7 @@
   }
 #endif /* ASSERT */
 
-  if (protection_domain == InstanceKlass::cast(klass())->protection_domain()) {
+  if (protection_domain == klass()->protection_domain()) {
     // Succeeds trivially
     return true;
   }
@@ -275,7 +275,7 @@
                           probe != NULL;
                           probe = probe->next()) {
       Klass* k = probe->klass();
-      if (probe->loader_data() == InstanceKlass::cast(k)->class_loader_data()) {
+      if (probe->loader_data() == k->class_loader_data()) {
         f(k);
       }
     }
@@ -290,7 +290,7 @@
                           probe != NULL;
                           probe = probe->next()) {
       Klass* k = probe->klass();
-      if (probe->loader_data() == InstanceKlass::cast(k)->class_loader_data()) {
+      if (probe->loader_data() == k->class_loader_data()) {
         f(k, CHECK);
       }
     }
@@ -322,7 +322,7 @@
                           probe != NULL;
                           probe = probe->next()) {
       Klass* k = probe->klass();
-      if (probe->loader_data() == InstanceKlass::cast(k)->class_loader_data()) {
+      if (probe->loader_data() == k->class_loader_data()) {
         // only take klass is we have the entry with the defining class loader
         InstanceKlass::cast(k)->methods_do(f);
       }
@@ -476,7 +476,7 @@
     DictionaryEntry* p = master_list;
     master_list = master_list->next();
     p->set_next(NULL);
-    Symbol* class_name = InstanceKlass::cast((Klass*)(p->klass()))->name();
+    Symbol* class_name = p->klass()->name();
     // Since the null class loader data isn't copied to the CDS archive,
     // compute the hash with NULL for loader data.
     unsigned int hash = compute_hash(class_name, NULL);
@@ -723,7 +723,7 @@
       Klass* e = probe->klass();
       ClassLoaderData* loader_data =  probe->loader_data();
       bool is_defining_class =
-         (loader_data == InstanceKlass::cast(e)->class_loader_data());
+         (loader_data == e->class_loader_data());
       tty->print("%s%s", ((!details) || is_defining_class) ? " " : "^",
                    e->external_name());
 
@@ -756,7 +756,7 @@
                           probe = probe->next()) {
       Klass* e = probe->klass();
       ClassLoaderData* loader_data = probe->loader_data();
-      guarantee(e->oop_is_instance(),
+      guarantee(e->is_instance_klass(),
                               "Verify of system dictionary failed");
       // class loader must be present;  a null class loader is the
       // boostrap loader
--- a/hotspot/src/share/vm/classfile/dictionary.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/classfile/dictionary.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -323,8 +323,7 @@
 
   bool equals(Symbol* class_name, ClassLoaderData* loader_data) const {
     Klass* klass = (Klass*)literal();
-    return (InstanceKlass::cast(klass)->name() == class_name &&
-            _loader_data == loader_data);
+    return (klass->name() == class_name && _loader_data == loader_data);
   }
 
   void print() {
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -114,17 +114,17 @@
 // Helpful routine for computing field offsets at run time rather than hardcoding them
 static void
 compute_offset(int &dest_offset,
-               Klass* klass_oop, Symbol* name_symbol, Symbol* signature_symbol,
+               Klass* klass, Symbol* name_symbol, Symbol* signature_symbol,
                bool is_static = false, bool allow_super = false) {
   fieldDescriptor fd;
-  InstanceKlass* ik = InstanceKlass::cast(klass_oop);
+  InstanceKlass* ik = InstanceKlass::cast(klass);
   if (!find_field(ik, name_symbol, signature_symbol, &fd, is_static, allow_super)) {
     ResourceMark rm;
     tty->print_cr("Invalid layout of %s at %s", ik->external_name(), name_symbol->as_C_string());
 #ifndef PRODUCT
-    klass_oop->print();
+    ik->print();
     tty->print_cr("all fields:");
-    for (AllFieldStream fs(InstanceKlass::cast(klass_oop)); !fs.done(); fs.next()) {
+    for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
       tty->print_cr("  name: %s, sig: %s, flags: %08x", fs.name()->as_C_string(), fs.signature()->as_C_string(), fs.access_flags().as_int());
     }
 #endif //PRODUCT
@@ -136,10 +136,10 @@
 // Same as above but for "optional" offsets that might not be present in certain JDK versions
 static void
 compute_optional_offset(int& dest_offset,
-                        Klass* klass_oop, Symbol* name_symbol, Symbol* signature_symbol,
+                        Klass* klass, Symbol* name_symbol, Symbol* signature_symbol,
                         bool allow_super = false) {
   fieldDescriptor fd;
-  InstanceKlass* ik = InstanceKlass::cast(klass_oop);
+  InstanceKlass* ik = InstanceKlass::cast(klass);
   if (find_field(ik, name_symbol, signature_symbol, &fd, allow_super)) {
     dest_offset = fd.offset();
   }
@@ -174,7 +174,7 @@
   // Create the String object first, so there's a chance that the String
   // and the char array it points to end up in the same cache line.
   oop obj;
-  obj = InstanceKlass::cast(SystemDictionary::String_klass())->allocate_instance(CHECK_NH);
+  obj = SystemDictionary::String_klass()->allocate_instance(CHECK_NH);
 
   // Create the char array.  The String object must be handlized here
   // because GC can happen as a result of the allocation attempt.
@@ -555,7 +555,7 @@
 
   // If the offset was read from the shared archive, it was fixed up already
   if (!k->is_shared()) {
-    if (k->oop_is_instance()) {
+    if (k->is_instance_klass()) {
       // During bootstrap, java.lang.Class wasn't loaded so static field
       // offsets were computed without the size added it.  Go back and
       // update all the static field offsets to included the size.
@@ -613,13 +613,13 @@
     java_lang_Class::set_static_oop_field_count(mirror(), mk->compute_static_oop_field_count(mirror()));
 
     // It might also have a component mirror.  This mirror must already exist.
-    if (k->oop_is_array()) {
+    if (k->is_array_klass()) {
       Handle comp_mirror;
-      if (k->oop_is_typeArray()) {
+      if (k->is_typeArray_klass()) {
         BasicType type = TypeArrayKlass::cast(k())->element_type();
         comp_mirror = Universe::java_mirror(type);
       } else {
-        assert(k->oop_is_objArray(), "Must be");
+        assert(k->is_objArray_klass(), "Must be");
         Klass* element_klass = ObjArrayKlass::cast(k())->element_klass();
         assert(element_klass != NULL, "Must have an element klass");
         comp_mirror = element_klass->java_mirror();
@@ -631,7 +631,7 @@
       set_component_mirror(mirror(), comp_mirror());
       set_array_klass(comp_mirror(), k());
     } else {
-      assert(k->oop_is_instance(), "Must be");
+      assert(k->is_instance_klass(), "Must be");
 
       initialize_mirror_fields(k, mirror, protection_domain, THREAD);
       if (HAS_PENDING_EXCEPTION) {
@@ -770,7 +770,7 @@
     name = vmSymbols::type_signature(primitive_type(java_class));
   } else {
     Klass* k = as_Klass(java_class);
-    is_instance = k->oop_is_instance();
+    is_instance = k->is_instance_klass();
     name = k->name();
   }
   if (name == NULL) {
@@ -793,7 +793,7 @@
     name->increment_refcount();
   } else {
     Klass* k = as_Klass(java_class);
-    if (!k->oop_is_instance()) {
+    if (!k->is_instance_klass()) {
       name = k->name();
       name->increment_refcount();
     } else {
@@ -829,13 +829,13 @@
 
 Klass* java_lang_Class::array_klass(oop java_class) {
   Klass* k = ((Klass*)java_class->metadata_field(_array_klass_offset));
-  assert(k == NULL || k->is_klass() && k->oop_is_array(), "should be array klass");
+  assert(k == NULL || k->is_klass() && k->is_array_klass(), "should be array klass");
   return k;
 }
 
 
 void java_lang_Class::set_array_klass(oop java_class, Klass* klass) {
-  assert(klass->is_klass() && klass->oop_is_array(), "should be array klass");
+  assert(klass->is_klass() && klass->is_array_klass(), "should be array klass");
   java_class->metadata_field_put(_array_klass_offset, klass);
 }
 
@@ -1236,7 +1236,7 @@
 }
 
 oop java_lang_Throwable::unassigned_stacktrace() {
-  InstanceKlass* ik = InstanceKlass::cast(SystemDictionary::Throwable_klass());
+  InstanceKlass* ik = SystemDictionary::Throwable_klass();
   address addr = ik->static_field_addr(static_unassigned_stacktrace_offset);
   if (UseCompressedOops) {
     return oopDesc::load_decode_heap_oop((narrowOop *)addr);
@@ -1293,7 +1293,7 @@
   ResourceMark rm;
   Klass* k = throwable->klass();
   assert(k != NULL, "just checking");
-  st->print("%s", InstanceKlass::cast(k)->external_name());
+  st->print("%s", k->external_name());
   oop msg = message(throwable);
   if (msg != NULL) {
     st->print(": %s", java_lang_String::as_utf8_string(msg));
@@ -1305,7 +1305,7 @@
   ResourceMark rm;
   Klass* k = throwable->klass();
   assert(k != NULL, "just checking");
-  st->print("%s", InstanceKlass::cast(k)->external_name());
+  st->print("%s", k->external_name());
   oop msg = message(throwable);
   if (msg != NULL) {
     st->print(": %s", java_lang_String::as_utf8_string(msg));
@@ -1561,7 +1561,7 @@
   st->print_cr("%s", buf);
 }
 
-void java_lang_Throwable::print_stack_element(outputStream *st, methodHandle method, int bci) {
+void java_lang_Throwable::print_stack_element(outputStream *st, const methodHandle& method, int bci) {
   Handle mirror = method->method_holder()->java_mirror();
   int method_id = method->orig_method_idnum();
   int version = method->constants()->version();
@@ -1632,7 +1632,7 @@
   }
 }
 
-void java_lang_Throwable::fill_in_stack_trace(Handle throwable, methodHandle method, TRAPS) {
+void java_lang_Throwable::fill_in_stack_trace(Handle throwable, const methodHandle& method, TRAPS) {
   if (!StackTraceInThrowable) return;
   ResourceMark rm(THREAD);
 
@@ -1763,7 +1763,7 @@
   set_backtrace(throwable(), bt.backtrace());
 }
 
-void java_lang_Throwable::fill_in_stack_trace(Handle throwable, methodHandle method) {
+void java_lang_Throwable::fill_in_stack_trace(Handle throwable, const methodHandle& method) {
   // No-op if stack trace is disabled
   if (!StackTraceInThrowable) {
     return;
@@ -1945,7 +1945,7 @@
   return element();
 }
 
-oop java_lang_StackTraceElement::create(methodHandle method, int bci, TRAPS) {
+oop java_lang_StackTraceElement::create(const methodHandle& method, int bci, TRAPS) {
   Handle mirror (THREAD, method->method_holder()->java_mirror());
   int method_id = method->orig_method_idnum();
   int cpref = method->name_index();
@@ -2506,7 +2506,7 @@
 
   oop mirror = reflect->obj_field(_oop_offset);
   Klass* k = java_lang_Class::as_Klass(mirror);
-  assert(k->oop_is_instance(), "Must be");
+  assert(k->is_instance_klass(), "Must be");
 
   // Get the constant pool back from the klass.  Since class redefinition
   // merges the new constant pool into the old, this is essentially the
@@ -2663,13 +2663,13 @@
 
 // Support for java_lang_ref_Reference
 HeapWord *java_lang_ref_Reference::pending_list_lock_addr() {
-  InstanceKlass* ik = InstanceKlass::cast(SystemDictionary::Reference_klass());
+  InstanceKlass* ik = SystemDictionary::Reference_klass();
   address addr = ik->static_field_addr(static_lock_offset);
   return (HeapWord*) addr;
 }
 
 oop java_lang_ref_Reference::pending_list_lock() {
-  InstanceKlass* ik = InstanceKlass::cast(SystemDictionary::Reference_klass());
+  InstanceKlass* ik = SystemDictionary::Reference_klass();
   address addr = ik->static_field_addr(static_lock_offset);
   if (UseCompressedOops) {
     return oopDesc::load_decode_heap_oop((narrowOop *)addr);
@@ -2679,7 +2679,7 @@
 }
 
 HeapWord *java_lang_ref_Reference::pending_list_addr() {
-  InstanceKlass* ik = InstanceKlass::cast(SystemDictionary::Reference_klass());
+  InstanceKlass* ik = SystemDictionary::Reference_klass();
   address addr = ik->static_field_addr(static_pending_offset);
   // XXX This might not be HeapWord aligned, almost rather be char *.
   return (HeapWord*)addr;
@@ -2702,13 +2702,13 @@
 }
 
 jlong java_lang_ref_SoftReference::clock() {
-  InstanceKlass* ik = InstanceKlass::cast(SystemDictionary::SoftReference_klass());
+  InstanceKlass* ik = SystemDictionary::SoftReference_klass();
   jlong* offset = (jlong*)ik->static_field_addr(static_clock_offset);
   return *offset;
 }
 
 void java_lang_ref_SoftReference::set_clock(jlong value) {
-  InstanceKlass* ik = InstanceKlass::cast(SystemDictionary::SoftReference_klass());
+  InstanceKlass* ik = SystemDictionary::SoftReference_klass();
   jlong* offset = (jlong*)ik->static_field_addr(static_clock_offset);
   *offset = value;
 }
@@ -3033,7 +3033,7 @@
 void java_security_AccessControlContext::compute_offsets() {
   assert(_isPrivileged_offset == 0, "offsets should be initialized only once");
   fieldDescriptor fd;
-  InstanceKlass* ik = InstanceKlass::cast(SystemDictionary::AccessControlContext_klass());
+  InstanceKlass* ik = SystemDictionary::AccessControlContext_klass();
 
   if (!ik->find_local_field(vmSymbols::context_name(), vmSymbols::protectiondomain_signature(), &fd)) {
     fatal("Invalid layout of java.security.AccessControlContext");
@@ -3066,9 +3066,9 @@
 oop java_security_AccessControlContext::create(objArrayHandle context, bool isPrivileged, Handle privileged_context, TRAPS) {
   assert(_isPrivileged_offset != 0, "offsets should have been initialized");
   // Ensure klass is initialized
-  InstanceKlass::cast(SystemDictionary::AccessControlContext_klass())->initialize(CHECK_0);
+  SystemDictionary::AccessControlContext_klass()->initialize(CHECK_0);
   // Allocate result
-  oop result = InstanceKlass::cast(SystemDictionary::AccessControlContext_klass())->allocate_instance(CHECK_0);
+  oop result = SystemDictionary::AccessControlContext_klass()->allocate_instance(CHECK_0);
   // Fill in values
   result->obj_field_put(_context_offset, context());
   result->obj_field_put(_privilegedContext_offset, privileged_context());
@@ -3190,7 +3190,7 @@
 
 
 bool java_lang_System::has_security_manager() {
-  InstanceKlass* ik = InstanceKlass::cast(SystemDictionary::System_klass());
+  InstanceKlass* ik = SystemDictionary::System_klass();
   address addr = ik->static_field_addr(static_security_offset);
   if (UseCompressedOops) {
     return oopDesc::load_decode_heap_oop((narrowOop *)addr) != NULL;
@@ -3630,8 +3630,8 @@
 #endif // PRODUCT
 
 int InjectedField::compute_offset() {
-  Klass* klass_oop = klass();
-  for (AllFieldStream fs(InstanceKlass::cast(klass_oop)); !fs.done(); fs.next()) {
+  InstanceKlass* ik = InstanceKlass::cast(klass());
+  for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
     if (!may_be_java && !fs.access_flags().is_internal()) {
       // Only look at injected fields
       continue;
@@ -3641,11 +3641,11 @@
     }
   }
   ResourceMark rm;
-  tty->print_cr("Invalid layout of %s at %s/%s%s", InstanceKlass::cast(klass_oop)->external_name(), name()->as_C_string(), signature()->as_C_string(), may_be_java ? " (may_be_java)" : "");
+  tty->print_cr("Invalid layout of %s at %s/%s%s", ik->external_name(), name()->as_C_string(), signature()->as_C_string(), may_be_java ? " (may_be_java)" : "");
 #ifndef PRODUCT
-  klass_oop->print();
+  ik->print();
   tty->print_cr("all fields:");
-  for (AllFieldStream fs(InstanceKlass::cast(klass_oop)); !fs.done(); fs.next()) {
+  for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
     tty->print_cr("  name: %s, sig: %s, flags: %08x", fs.name()->as_C_string(), fs.signature()->as_C_string(), fs.access_flags().as_int());
   }
 #endif //PRODUCT
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -532,7 +532,7 @@
   static Symbol* detail_message(oop throwable);
   static void print_stack_element(outputStream *st, Handle mirror, int method,
                                   int version, int bci, int cpref);
-  static void print_stack_element(outputStream *st, methodHandle method, int bci);
+  static void print_stack_element(outputStream *st, const methodHandle& method, int bci);
   static void print_stack_usage(Handle stream);
 
   // Allocate space for backtrace (created but stack trace not filled in)
@@ -540,8 +540,8 @@
   // Fill in current stack trace for throwable with preallocated backtrace (no GC)
   static void fill_in_stack_trace_of_preallocated_backtrace(Handle throwable);
   // Fill in current stack trace, can cause GC
-  static void fill_in_stack_trace(Handle throwable, methodHandle method, TRAPS);
-  static void fill_in_stack_trace(Handle throwable, methodHandle method = methodHandle());
+  static void fill_in_stack_trace(Handle throwable, const methodHandle& method, TRAPS);
+  static void fill_in_stack_trace(Handle throwable, const methodHandle& method = methodHandle());
   // Programmatic access to stack trace
   static oop  get_stack_trace_element(oop throwable, int index, TRAPS);
   static int  get_stack_trace_depth(oop throwable, TRAPS);
@@ -1347,7 +1347,7 @@
 
   // Create an instance of StackTraceElement
   static oop create(Handle mirror, int method, int version, int bci, int cpref, TRAPS);
-  static oop create(methodHandle method, int bci, TRAPS);
+  static oop create(const methodHandle& method, int bci, TRAPS);
 
   // Debugging
   friend class JavaClasses;
--- a/hotspot/src/share/vm/classfile/loaderConstraints.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/classfile/loaderConstraints.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -320,7 +320,7 @@
                                                        Handle loader) {
   LoaderConstraintEntry *p = *(find_loader_constraint(name, loader));
   if (p != NULL && p->klass() != NULL) {
-    if (p->klass()->oop_is_instance() && !InstanceKlass::cast(p->klass())->is_loaded()) {
+    if (p->klass()->is_instance_klass() && !InstanceKlass::cast(p->klass())->is_loaded()) {
       // Only return fully loaded classes.  Classes found through the
       // constraints might still be in the process of loading.
       return NULL;
--- a/hotspot/src/share/vm/classfile/placeholders.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/classfile/placeholders.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -244,7 +244,7 @@
   guarantee(loader_data()->class_loader() == NULL || loader_data()->class_loader()->is_instance(),
             "checking type of _loader");
   guarantee(instance_klass() == NULL
-            || instance_klass()->oop_is_instance(),
+            || instance_klass()->is_instance_klass(),
             "checking type of instance_klass result");
 }
 
--- a/hotspot/src/share/vm/classfile/placeholders.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/classfile/placeholders.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -220,7 +220,7 @@
   }
 
   SeenThread* actionToQueue(PlaceholderTable::classloadAction action) {
-    SeenThread* queuehead;
+    SeenThread* queuehead = NULL;
     switch (action) {
       case PlaceholderTable::LOAD_INSTANCE:
          queuehead = _loadInstanceThreadQ;
--- a/hotspot/src/share/vm/classfile/resolutionErrors.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/classfile/resolutionErrors.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,7 @@
 
 // add new entry to the table
 void ResolutionErrorTable::add_entry(int index, unsigned int hash,
-                                     constantPoolHandle pool, int cp_index,
+                                     const constantPoolHandle& pool, int cp_index,
                                      Symbol* error, Symbol* message)
 {
   assert_locked_or_safepoint(SystemDictionary_lock);
@@ -44,7 +44,7 @@
 
 // find entry in the table
 ResolutionErrorEntry* ResolutionErrorTable::find_entry(int index, unsigned int hash,
-                                                       constantPoolHandle pool, int cp_index)
+                                                       const constantPoolHandle& pool, int cp_index)
 {
   assert_locked_or_safepoint(SystemDictionary_lock);
 
--- a/hotspot/src/share/vm/classfile/resolutionErrors.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/classfile/resolutionErrors.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -56,15 +56,15 @@
   }
 
   void add_entry(int index, unsigned int hash,
-                 constantPoolHandle pool, int which, Symbol* error, Symbol* message);
+                 const constantPoolHandle& pool, int which, Symbol* error, Symbol* message);
 
 
   // find error given the constant pool and constant pool index
   ResolutionErrorEntry* find_entry(int index, unsigned int hash,
-                                   constantPoolHandle pool, int cp_index);
+                                   const constantPoolHandle& pool, int cp_index);
 
 
-  unsigned int compute_hash(constantPoolHandle pool, int cp_index) {
+  unsigned int compute_hash(const constantPoolHandle& pool, int cp_index) {
     return (unsigned int) pool->identity_hash() + cp_index;
   }
 
--- a/hotspot/src/share/vm/classfile/symbolTable.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/classfile/symbolTable.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -371,7 +371,7 @@
   }
 }
 
-void SymbolTable::add(ClassLoaderData* loader_data, constantPoolHandle cp,
+void SymbolTable::add(ClassLoaderData* loader_data, const constantPoolHandle& cp,
                       int names_count,
                       const char** names, int* lengths, int* cp_indices,
                       unsigned int* hashValues, TRAPS) {
@@ -452,7 +452,7 @@
 
 // This version of basic_add adds symbols in batch from the constant pool
 // parsing.
-bool SymbolTable::basic_add(ClassLoaderData* loader_data, constantPoolHandle cp,
+bool SymbolTable::basic_add(ClassLoaderData* loader_data, const constantPoolHandle& cp,
                             int names_count,
                             const char** names, int* lengths,
                             int* cp_indices, unsigned int* hashValues,
--- a/hotspot/src/share/vm/classfile/symbolTable.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/classfile/symbolTable.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -100,12 +100,12 @@
   Symbol* basic_add(int index, u1* name, int len, unsigned int hashValue,
                     bool c_heap, TRAPS);
   bool basic_add(ClassLoaderData* loader_data,
-                 constantPoolHandle cp, int names_count,
+                 const constantPoolHandle& cp, int names_count,
                  const char** names, int* lengths, int* cp_indices,
                  unsigned int* hashValues, TRAPS);
 
   static void new_symbols(ClassLoaderData* loader_data,
-                          constantPoolHandle cp, int names_count,
+                          const constantPoolHandle& cp, int names_count,
                           const char** name, int* lengths,
                           int* cp_indices, unsigned int* hashValues,
                           TRAPS) {
@@ -170,7 +170,7 @@
   static Symbol* lookup_only_unicode(const jchar* name, int len, unsigned int& hash);
 
   static void add(ClassLoaderData* loader_data,
-                  constantPoolHandle cp, int names_count,
+                  const constantPoolHandle& cp, int names_count,
                   const char** names, int* lengths, int* cp_indices,
                   unsigned int* hashValues, TRAPS);
 
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -88,10 +88,10 @@
 
 oop         SystemDictionary::_system_loader_lock_obj     =  NULL;
 
-Klass*      SystemDictionary::_well_known_klasses[SystemDictionary::WKID_LIMIT]
+InstanceKlass*      SystemDictionary::_well_known_klasses[SystemDictionary::WKID_LIMIT]
                                                           =  { NULL /*, NULL...*/ };
 
-Klass*      SystemDictionary::_box_klasses[T_VOID+1]      =  { NULL /*, NULL...*/ };
+InstanceKlass*      SystemDictionary::_box_klasses[T_VOID+1]      =  { NULL /*, NULL...*/ };
 
 oop         SystemDictionary::_java_system_loader         =  NULL;
 
@@ -99,7 +99,7 @@
 bool        SystemDictionary::_has_checkPackageAccess     =  false;
 
 // lazily initialized klass variables
-Klass* volatile SystemDictionary::_abstract_ownable_synchronizer_klass = NULL;
+InstanceKlass* volatile SystemDictionary::_abstract_ownable_synchronizer_klass = NULL;
 
 
 // ----------------------------------------------------------------------------
@@ -357,7 +357,7 @@
     // so we don't throw an exception here.
     // see: nsk redefclass014 & java.lang.instrument Instrument032
     if ((childk != NULL ) && (is_superclass) &&
-       ((quicksuperk = InstanceKlass::cast(childk)->super()) != NULL) &&
+       ((quicksuperk = childk->super()) != NULL) &&
 
          ((quicksuperk->name() == class_name) &&
             (quicksuperk->class_loader()  == class_loader()))) {
@@ -1257,8 +1257,7 @@
     }
 
     // notify a class loaded from shared object
-    ClassLoadingService::notify_class_loaded(InstanceKlass::cast(ik()),
-                                             true /* shared class */);
+    ClassLoadingService::notify_class_loaded(ik(), true /* shared class */);
   }
   return ik;
 }
@@ -1805,7 +1804,7 @@
     Klass* k = resolve_or_fail(vmSymbols::java_util_concurrent_locks_AbstractOwnableSynchronizer(), true, CHECK);
     // Force a fence to prevent any read before the write completes
     OrderAccess::fence();
-    _abstract_ownable_synchronizer_klass = k;
+    _abstract_ownable_synchronizer_klass = InstanceKlass::cast(k);
   }
 }
 
@@ -1846,14 +1845,16 @@
   int  info = wk_init_info[id - FIRST_WKID];
   int  sid  = (info >> CEIL_LG_OPTION_LIMIT);
   Symbol* symbol = vmSymbols::symbol_at((vmSymbols::SID)sid);
-  Klass**    klassp = &_well_known_klasses[id];
+  InstanceKlass** klassp = &_well_known_klasses[id];
   bool must_load = (init_opt < SystemDictionary::Opt);
   if ((*klassp) == NULL) {
+    Klass* k;
     if (must_load) {
-      (*klassp) = resolve_or_fail(symbol, true, CHECK_0); // load required class
+      k = resolve_or_fail(symbol, true, CHECK_0); // load required class
     } else {
-      (*klassp) = resolve_or_null(symbol,       CHECK_0); // load optional klass
+      k = resolve_or_null(symbol,       CHECK_0); // load optional klass
     }
+    (*klassp) = (k == NULL) ? NULL : InstanceKlass::cast(k);
   }
   return ((*klassp) != NULL);
 }
@@ -1966,7 +1967,8 @@
                                          instanceKlassHandle k,
                                          Handle class_loader, bool defining,
                                          TRAPS) {
-  const char *linkage_error = NULL;
+  const char *linkage_error1 = NULL;
+  const char *linkage_error2 = NULL;
   {
     Symbol*  name  = k->name();
     ClassLoaderData *loader_data = class_loader_data(class_loader);
@@ -1981,10 +1983,10 @@
       // system dictionary only holds instance classes, placeholders
       // also holds array classes
 
-      assert(check->oop_is_instance(), "noninstance in systemdictionary");
+      assert(check->is_instance_klass(), "noninstance in systemdictionary");
       if ((defining == true) || (k() != check)) {
-        linkage_error = "loader (instance of  %s): attempted  duplicate class "
-          "definition for name: \"%s\"";
+        linkage_error1 = "loader (instance of  ";
+        linkage_error2 = "): attempted  duplicate class definition for name: \"";
       } else {
         return;
       }
@@ -1995,10 +1997,10 @@
     assert(ph_check == NULL || ph_check == name, "invalid symbol");
 #endif
 
-    if (linkage_error == NULL) {
+    if (linkage_error1 == NULL) {
       if (constraints()->check_or_update(k, class_loader, name) == false) {
-        linkage_error = "loader constraint violation: loader (instance of %s)"
-          " previously initiated loading for a different type with name \"%s\"";
+        linkage_error1 = "loader constraint violation: loader (instance of ";
+        linkage_error2 = ") previously initiated loading for a different type with name \"";
       }
     }
   }
@@ -2006,14 +2008,14 @@
   // Throw error now if needed (cannot throw while holding
   // SystemDictionary_lock because of rank ordering)
 
-  if (linkage_error) {
+  if (linkage_error1) {
     ResourceMark rm(THREAD);
     const char* class_loader_name = loader_name(class_loader());
     char* type_name = k->name()->as_C_string();
-    size_t buflen = strlen(linkage_error) + strlen(class_loader_name) +
-      strlen(type_name);
+    size_t buflen = strlen(linkage_error1) + strlen(class_loader_name) +
+      strlen(linkage_error2) + strlen(type_name) + 2; // +2 for '"' and null byte.
     char* buf = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, buflen);
-    jio_snprintf(buf, buflen, linkage_error, class_loader_name, type_name);
+    jio_snprintf(buf, buflen, "%s%s%s%s\"", linkage_error1, class_loader_name, linkage_error2, type_name);
     THROW_MSG(vmSymbols::java_lang_LinkageError(), buf);
   }
 }
@@ -2155,7 +2157,7 @@
 
 // Add entry to resolution error table to record the error when the first
 // attempt to resolve a reference to a class has failed.
-void SystemDictionary::add_resolution_error(constantPoolHandle pool, int which,
+void SystemDictionary::add_resolution_error(const constantPoolHandle& pool, int which,
                                             Symbol* error, Symbol* message) {
   unsigned int hash = resolution_errors()->compute_hash(pool, which);
   int index = resolution_errors()->hash_to_index(hash);
@@ -2171,7 +2173,7 @@
 }
 
 // Lookup resolution error table. Returns error if found, otherwise NULL.
-Symbol* SystemDictionary::find_resolution_error(constantPoolHandle pool, int which,
+Symbol* SystemDictionary::find_resolution_error(const constantPoolHandle& pool, int which,
                                                 Symbol** message) {
   unsigned int hash = resolution_errors()->compute_hash(pool, which);
   int index = resolution_errors()->hash_to_index(hash);
@@ -2387,13 +2389,13 @@
 // Out of an abundance of caution, we do not include any other classes, not even for packages like java.util.
 static bool is_always_visible_class(oop mirror) {
   Klass* klass = java_lang_Class::as_Klass(mirror);
-  if (klass->oop_is_objArray()) {
+  if (klass->is_objArray_klass()) {
     klass = ObjArrayKlass::cast(klass)->bottom_klass(); // check element type
   }
-  if (klass->oop_is_typeArray()) {
+  if (klass->is_typeArray_klass()) {
     return true; // primitive array
   }
-  assert(klass->oop_is_instance(), "%s", klass->external_name());
+  assert(klass->is_instance_klass(), "%s", klass->external_name());
   return klass->is_public() &&
          (InstanceKlass::cast(klass)->is_same_class_package(SystemDictionary::Object_klass()) ||       // java.lang
           InstanceKlass::cast(klass)->is_same_class_package(SystemDictionary::MethodHandle_klass()));  // java.lang.invoke
@@ -2457,9 +2459,9 @@
       Klass* sel_klass = java_lang_Class::as_Klass(mirror);
       mirror = NULL;  // safety
       // Emulate ConstantPool::verify_constant_pool_resolve.
-      if (sel_klass->oop_is_objArray())
+      if (sel_klass->is_objArray_klass())
         sel_klass = ObjArrayKlass::cast(sel_klass)->bottom_klass();
-      if (sel_klass->oop_is_instance()) {
+      if (sel_klass->is_instance_klass()) {
         KlassHandle sel_kh(THREAD, sel_klass);
         LinkResolver::check_klass_accessability(accessing_klass, sel_kh, CHECK_(empty));
       }
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -168,7 +168,7 @@
                                                                                                                          \
   do_klass(StringBuffer_klass,                          java_lang_StringBuffer,                    Pre                 ) \
   do_klass(StringBuilder_klass,                         java_lang_StringBuilder,                   Pre                 ) \
-  do_klass(misc_Unsafe_klass,                           sun_misc_Unsafe,                           Pre                 ) \
+  do_klass(internal_Unsafe_klass,                       jdk_internal_misc_Unsafe,                  Pre                 ) \
                                                                                                                          \
   /* support for CDS */                                                                                                  \
   do_klass(ByteArrayInputStream_klass,                  java_io_ByteArrayInputStream,              Pre                 ) \
@@ -403,15 +403,15 @@
   static void initialize(TRAPS);
 
   // Fast access to commonly used classes (preloaded)
-  static Klass* check_klass(Klass* k) {
+  static InstanceKlass* check_klass(InstanceKlass* k) {
     assert(k != NULL, "preloaded klass not initialized");
     return k;
   }
 
-  static Klass* check_klass_Pre(       Klass* k) { return check_klass(k); }
-  static Klass* check_klass_Opt(       Klass* k) { return k; }
+  static InstanceKlass* check_klass_Pre(InstanceKlass* k) { return check_klass(k); }
+  static InstanceKlass* check_klass_Opt(InstanceKlass* k) { return k; }
 
-  JVMCI_ONLY(static Klass* check_klass_Jvmci(Klass* k) { return k; })
+  JVMCI_ONLY(static InstanceKlass* check_klass_Jvmci(InstanceKlass* k) { return k; })
 
   static bool initialize_wk_klass(WKID id, int init_opt, TRAPS);
   static void initialize_wk_klasses_until(WKID limit_id, WKID &start_id, TRAPS);
@@ -422,19 +422,19 @@
 
 public:
   #define WK_KLASS_DECLARE(name, symbol, option) \
-    static Klass* name() { return check_klass_##option(_well_known_klasses[WK_KLASS_ENUM_NAME(name)]); } \
-    static Klass** name##_addr() {                                                                       \
+    static InstanceKlass* name() { return check_klass_##option(_well_known_klasses[WK_KLASS_ENUM_NAME(name)]); } \
+    static InstanceKlass** name##_addr() {                                                                       \
       return &SystemDictionary::_well_known_klasses[SystemDictionary::WK_KLASS_ENUM_NAME(name)];           \
     }
   WK_KLASSES_DO(WK_KLASS_DECLARE);
   #undef WK_KLASS_DECLARE
 
-  static Klass* well_known_klass(WKID id) {
+  static InstanceKlass* well_known_klass(WKID id) {
     assert(id >= (int)FIRST_WKID && id < (int)WKID_LIMIT, "oob");
     return _well_known_klasses[id];
   }
 
-  static Klass** well_known_klass_addr(WKID id) {
+  static InstanceKlass** well_known_klass_addr(WKID id) {
     assert(id >= (int)FIRST_WKID && id < (int)WKID_LIMIT, "oob");
     return &_well_known_klasses[id];
   }
@@ -442,7 +442,7 @@
   // Local definition for direct access to the private array:
   #define WK_KLASS(name) _well_known_klasses[SystemDictionary::WK_KLASS_ENUM_NAME(name)]
 
-  static Klass* box_klass(BasicType t) {
+  static InstanceKlass* box_klass(BasicType t) {
     assert((uint)t < T_VOID+1, "range check");
     return check_klass(_box_klasses[t]);
   }
@@ -450,7 +450,7 @@
 
   // methods returning lazily loaded klasses
   // The corresponding method to load the class must be called before calling them.
-  static Klass* abstract_ownable_synchronizer_klass() { return check_klass(_abstract_ownable_synchronizer_klass); }
+  static InstanceKlass* abstract_ownable_synchronizer_klass() { return check_klass(_abstract_ownable_synchronizer_klass); }
 
   static void load_abstract_ownable_synchronizer_klass(TRAPS);
 
@@ -547,10 +547,10 @@
 
   // Record the error when the first attempt to resolve a reference from a constant
   // pool entry to a class fails.
-  static void add_resolution_error(constantPoolHandle pool, int which, Symbol* error,
+  static void add_resolution_error(const constantPoolHandle& pool, int which, Symbol* error,
                                    Symbol* message);
   static void delete_resolution_error(ConstantPool* pool);
-  static Symbol* find_resolution_error(constantPoolHandle pool, int which,
+  static Symbol* find_resolution_error(const constantPoolHandle& pool, int which,
                                        Symbol** message);
 
  protected:
@@ -700,13 +700,13 @@
                                 TRAPS);
 
   // Variables holding commonly used klasses (preloaded)
-  static Klass* _well_known_klasses[];
+  static InstanceKlass* _well_known_klasses[];
 
   // Lazily loaded klasses
-  static Klass* volatile _abstract_ownable_synchronizer_klass;
+  static InstanceKlass* volatile _abstract_ownable_synchronizer_klass;
 
   // table of box klasses (int_klass, etc.)
-  static Klass* _box_klasses[T_VOID+1];
+  static InstanceKlass* _box_klasses[T_VOID+1];
 
   static oop  _java_system_loader;
 
--- a/hotspot/src/share/vm/classfile/verifier.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/classfile/verifier.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -184,7 +184,7 @@
     if (HAS_PENDING_EXCEPTION) {
       tty->print("Verification for %s has", klassName);
       tty->print_cr(" exception pending %s ",
-        InstanceKlass::cast(PENDING_EXCEPTION->klass())->external_name());
+        PENDING_EXCEPTION->klass()->external_name());
     } else if (exception_name != NULL) {
       tty->print_cr("Verification for %s failed", klassName);
     }
@@ -605,7 +605,7 @@
   }
 }
 
-void ClassVerifier::verify_method(methodHandle m, TRAPS) {
+void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
   HandleMark hm(THREAD);
   _method = m;   // initialize _method
   if (VerboseVerification) {
@@ -1901,7 +1901,7 @@
 }
 
 void ClassVerifier::verify_cp_index(
-    u2 bci, constantPoolHandle cp, int index, TRAPS) {
+    u2 bci, const constantPoolHandle& cp, int index, TRAPS) {
   int nconstants = cp->length();
   if ((index <= 0) || (index >= nconstants)) {
     verify_error(ErrorContext::bad_cp_index(bci, index),
@@ -1912,7 +1912,7 @@
 }
 
 void ClassVerifier::verify_cp_type(
-    u2 bci, int index, constantPoolHandle cp, unsigned int types, TRAPS) {
+    u2 bci, int index, const constantPoolHandle& cp, unsigned int types, TRAPS) {
 
   // In some situations, bytecode rewriting may occur while we're verifying.
   // In this case, a constant pool cache exists and some indices refer to that
@@ -1931,7 +1931,7 @@
 }
 
 void ClassVerifier::verify_cp_class_type(
-    u2 bci, int index, constantPoolHandle cp, TRAPS) {
+    u2 bci, int index, const constantPoolHandle& cp, TRAPS) {
   verify_cp_index(bci, cp, index, CHECK_VERIFY(this));
   constantTag tag = cp->tag_at(index);
   if (!tag.is_klass() && !tag.is_unresolved_klass()) {
@@ -2023,7 +2023,7 @@
 
 void ClassVerifier::verify_ldc(
     int opcode, u2 index, StackMapFrame* current_frame,
-    constantPoolHandle cp, u2 bci, TRAPS) {
+    const constantPoolHandle& cp, u2 bci, TRAPS) {
   verify_cp_index(bci, cp, index, CHECK_VERIFY(this));
   constantTag tag = cp->tag_at(index);
   unsigned int types;
@@ -2165,7 +2165,7 @@
 
 void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
                                               StackMapFrame* current_frame,
-                                              constantPoolHandle cp,
+                                              const constantPoolHandle& cp,
                                               bool allow_arrays,
                                               TRAPS) {
   u2 index = bcs->get_index_u2();
@@ -2477,7 +2477,7 @@
 void ClassVerifier::verify_invoke_init(
     RawBytecodeStream* bcs, u2 ref_class_index, VerificationType ref_class_type,
     StackMapFrame* current_frame, u4 code_length, bool in_try_block,
-    bool *this_uninit, constantPoolHandle cp, StackMapTable* stackmap_table,
+    bool *this_uninit, const constantPoolHandle& cp, StackMapTable* stackmap_table,
     TRAPS) {
   u2 bci = bcs->bci();
   VerificationType type = current_frame->pop_stack(
@@ -2613,7 +2613,7 @@
 void ClassVerifier::verify_invoke_instructions(
     RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame,
     bool in_try_block, bool *this_uninit, VerificationType return_type,
-    constantPoolHandle cp, StackMapTable* stackmap_table, TRAPS) {
+    const constantPoolHandle& cp, StackMapTable* stackmap_table, TRAPS) {
   // Make sure the constant pool item is the right type
   u2 index = bcs->get_index_u2();
   Bytecodes::Code opcode = bcs->raw_code();
@@ -2878,7 +2878,7 @@
 }
 
 void ClassVerifier::verify_anewarray(
-    u2 bci, u2 index, constantPoolHandle cp,
+    u2 bci, u2 index, const constantPoolHandle& cp,
     StackMapFrame* current_frame, TRAPS) {
   verify_cp_class_type(bci, index, cp, CHECK_VERIFY(this));
   current_frame->pop_stack(
--- a/hotspot/src/share/vm/classfile/verifier.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/classfile/verifier.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -262,14 +262,14 @@
 
   ErrorContext _error_context;  // contains information about an error
 
-  void verify_method(methodHandle method, TRAPS);
+  void verify_method(const methodHandle& method, TRAPS);
   char* generate_code_data(methodHandle m, u4 code_length, TRAPS);
   void verify_exception_handler_table(u4 code_length, char* code_data,
                                       int& min, int& max, TRAPS);
   void verify_local_variable_table(u4 code_length, char* code_data, TRAPS);
 
   VerificationType cp_ref_index_to_type(
-      int index, constantPoolHandle cp, TRAPS) {
+      int index, const constantPoolHandle& cp, TRAPS) {
     return cp_index_to_type(cp->klass_ref_index_at(index), cp, THREAD);
   }
 
@@ -277,10 +277,10 @@
     instanceKlassHandle this_class, Klass* target_class,
     Symbol* field_name, Symbol* field_sig, bool is_method);
 
-  void verify_cp_index(u2 bci, constantPoolHandle cp, int index, TRAPS);
-  void verify_cp_type(u2 bci, int index, constantPoolHandle cp,
+  void verify_cp_index(u2 bci, const constantPoolHandle& cp, int index, TRAPS);
+  void verify_cp_type(u2 bci, int index, const constantPoolHandle& cp,
       unsigned int types, TRAPS);
-  void verify_cp_class_type(u2 bci, int index, constantPoolHandle cp, TRAPS);
+  void verify_cp_class_type(u2 bci, int index, const constantPoolHandle& cp, TRAPS);
 
   u2 verify_stackmap_table(
     u2 stackmap_index, u2 bci, StackMapFrame* current_frame,
@@ -292,7 +292,7 @@
 
   void verify_ldc(
     int opcode, u2 index, StackMapFrame *current_frame,
-    constantPoolHandle cp, u2 bci, TRAPS);
+    const constantPoolHandle& cp, u2 bci, TRAPS);
 
   void verify_switch(
     RawBytecodeStream* bcs, u4 code_length, char* code_data,
@@ -300,12 +300,12 @@
 
   void verify_field_instructions(
     RawBytecodeStream* bcs, StackMapFrame* current_frame,
-    constantPoolHandle cp, bool allow_arrays, TRAPS);
+    const constantPoolHandle& cp, bool allow_arrays, TRAPS);
 
   void verify_invoke_init(
     RawBytecodeStream* bcs, u2 ref_index, VerificationType ref_class_type,
     StackMapFrame* current_frame, u4 code_length, bool in_try_block,
-    bool* this_uninit, constantPoolHandle cp, StackMapTable* stackmap_table,
+    bool* this_uninit, const constantPoolHandle& cp, StackMapTable* stackmap_table,
     TRAPS);
 
   // Used by ends_in_athrow() to push all handlers that contain bci onto the
@@ -322,10 +322,10 @@
   void verify_invoke_instructions(
     RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame,
     bool in_try_block, bool* this_uninit, VerificationType return_type,
-    constantPoolHandle cp, StackMapTable* stackmap_table, TRAPS);
+    const constantPoolHandle& cp, StackMapTable* stackmap_table, TRAPS);
 
   VerificationType get_newarray_type(u2 index, u2 bci, TRAPS);
-  void verify_anewarray(u2 bci, u2 index, constantPoolHandle cp,
+  void verify_anewarray(u2 bci, u2 index, const constantPoolHandle& cp,
       StackMapFrame* current_frame, TRAPS);
   void verify_return_value(
       VerificationType return_type, VerificationType type, u2 offset,
@@ -406,7 +406,7 @@
   int change_sig_to_verificationType(
     SignatureStream* sig_type, VerificationType* inference_type, TRAPS);
 
-  VerificationType cp_index_to_type(int index, constantPoolHandle cp, TRAPS) {
+  VerificationType cp_index_to_type(int index, const constantPoolHandle& cp, TRAPS) {
     return VerificationType::reference_type(cp->klass_name_at(index));
   }
 
--- a/hotspot/src/share/vm/classfile/vmSymbols.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/classfile/vmSymbols.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -417,7 +417,7 @@
   }
 }
 
-bool vmIntrinsics::is_disabled_by_flags(methodHandle method, methodHandle compilation_context) {
+bool vmIntrinsics::is_disabled_by_flags(const methodHandle& method, const methodHandle& compilation_context) {
   vmIntrinsics::ID id = method->intrinsic_id();
   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
 
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -938,24 +938,25 @@
   do_intrinsic(_updateByteBufferAdler32,  java_util_zip_Adler32,  updateByteBuffer_A_name,  updateByteBuffer_signature,  F_SN) \
    do_name(     updateByteBuffer_A_name,                          "updateByteBuffer")                                   \
                                                                                                                         \
-  /* support for sun.misc.Unsafe */                                                                                     \
-  do_class(sun_misc_Unsafe,               "sun/misc/Unsafe")                                                            \
+  /* support for Unsafe */                                                                                              \
+  do_class(sun_misc_Unsafe,                        "sun/misc/Unsafe")                                                   \
+  do_class(jdk_internal_misc_Unsafe,               "jdk/internal/misc/Unsafe")                                          \
                                                                                                                         \
-  do_intrinsic(_allocateInstance,         sun_misc_Unsafe,        allocateInstance_name, allocateInstance_signature, F_RN) \
-   do_name(     allocateInstance_name,                           "allocateInstance")                                    \
-   do_signature(allocateInstance_signature,   "(Ljava/lang/Class;)Ljava/lang/Object;")                                  \
-  do_intrinsic(_copyMemory,               sun_misc_Unsafe,        copyMemory_name, copyMemory_signature,         F_RN)  \
-   do_name(     copyMemory_name,                                 "copyMemory")                                          \
-   do_signature(copyMemory_signature,         "(Ljava/lang/Object;JLjava/lang/Object;JJ)V")                             \
-  do_intrinsic(_loadFence,                sun_misc_Unsafe,        loadFence_name, loadFence_signature,           F_RN)  \
-   do_name(     loadFence_name,                                  "loadFence")                                           \
-   do_alias(    loadFence_signature,                              void_method_signature)                                \
-  do_intrinsic(_storeFence,               sun_misc_Unsafe,        storeFence_name, storeFence_signature,         F_RN)  \
-   do_name(     storeFence_name,                                 "storeFence")                                          \
-   do_alias(    storeFence_signature,                             void_method_signature)                                \
-  do_intrinsic(_fullFence,                sun_misc_Unsafe,        fullFence_name, fullFence_signature,           F_RN)  \
-   do_name(     fullFence_name,                                  "fullFence")                                           \
-   do_alias(    fullFence_signature,                              void_method_signature)                                \
+  do_intrinsic(_allocateInstance,         jdk_internal_misc_Unsafe,     allocateInstance_name, allocateInstance_signature, F_RN) \
+   do_name(     allocateInstance_name,                                  "allocateInstance")                                      \
+   do_signature(allocateInstance_signature,                             "(Ljava/lang/Class;)Ljava/lang/Object;")                 \
+  do_intrinsic(_copyMemory,               jdk_internal_misc_Unsafe,     copyMemory_name, copyMemory_signature,         F_RN)     \
+   do_name(     copyMemory_name,                                        "copyMemory")                                            \
+   do_signature(copyMemory_signature,                                   "(Ljava/lang/Object;JLjava/lang/Object;JJ)V")            \
+  do_intrinsic(_loadFence,                jdk_internal_misc_Unsafe,     loadFence_name, loadFence_signature,           F_RN)     \
+   do_name(     loadFence_name,                                         "loadFence")                                             \
+   do_alias(    loadFence_signature,                                    void_method_signature)                                   \
+  do_intrinsic(_storeFence,               jdk_internal_misc_Unsafe,     storeFence_name, storeFence_signature,         F_RN)     \
+   do_name(     storeFence_name,                                        "storeFence")                                            \
+   do_alias(    storeFence_signature,                                   void_method_signature)                                   \
+  do_intrinsic(_fullFence,                jdk_internal_misc_Unsafe,     fullFence_name, fullFence_signature,           F_RN)     \
+   do_name(     fullFence_name,                                         "fullFence")                                             \
+   do_alias(    fullFence_signature,                                    void_method_signature)                                   \
                                                                                                                         \
   /* Custom branch frequencies profiling support for JSR292 */                                                          \
   do_class(java_lang_invoke_MethodHandleImpl,               "java/lang/invoke/MethodHandleImpl")                        \
@@ -996,24 +997,24 @@
   do_name(getFloat_name,"getFloat")             do_name(putFloat_name,"putFloat")                                       \
   do_name(getDouble_name,"getDouble")           do_name(putDouble_name,"putDouble")                                     \
                                                                                                                         \
-  do_intrinsic(_getObject,                sun_misc_Unsafe,        getObject_name, getObject_signature,           F_RN)  \
-  do_intrinsic(_getBoolean,               sun_misc_Unsafe,        getBoolean_name, getBoolean_signature,         F_RN)  \
-  do_intrinsic(_getByte,                  sun_misc_Unsafe,        getByte_name, getByte_signature,               F_RN)  \
-  do_intrinsic(_getShort,                 sun_misc_Unsafe,        getShort_name, getShort_signature,             F_RN)  \
-  do_intrinsic(_getChar,                  sun_misc_Unsafe,        getChar_name, getChar_signature,               F_RN)  \
-  do_intrinsic(_getInt,                   sun_misc_Unsafe,        getInt_name, getInt_signature,                 F_RN)  \
-  do_intrinsic(_getLong,                  sun_misc_Unsafe,        getLong_name, getLong_signature,               F_RN)  \
-  do_intrinsic(_getFloat,                 sun_misc_Unsafe,        getFloat_name, getFloat_signature,             F_RN)  \
-  do_intrinsic(_getDouble,                sun_misc_Unsafe,        getDouble_name, getDouble_signature,           F_RN)  \
-  do_intrinsic(_putObject,                sun_misc_Unsafe,        putObject_name, putObject_signature,           F_RN)  \
-  do_intrinsic(_putBoolean,               sun_misc_Unsafe,        putBoolean_name, putBoolean_signature,         F_RN)  \
-  do_intrinsic(_putByte,                  sun_misc_Unsafe,        putByte_name, putByte_signature,               F_RN)  \
-  do_intrinsic(_putShort,                 sun_misc_Unsafe,        putShort_name, putShort_signature,             F_RN)  \
-  do_intrinsic(_putChar,                  sun_misc_Unsafe,        putChar_name, putChar_signature,               F_RN)  \
-  do_intrinsic(_putInt,                   sun_misc_Unsafe,        putInt_name, putInt_signature,                 F_RN)  \
-  do_intrinsic(_putLong,                  sun_misc_Unsafe,        putLong_name, putLong_signature,               F_RN)  \
-  do_intrinsic(_putFloat,                 sun_misc_Unsafe,        putFloat_name, putFloat_signature,             F_RN)  \
-  do_intrinsic(_putDouble,                sun_misc_Unsafe,        putDouble_name, putDouble_signature,           F_RN)  \
+  do_intrinsic(_getObject,          jdk_internal_misc_Unsafe,     getObject_name, getObject_signature,           F_RN)  \
+  do_intrinsic(_getBoolean,         jdk_internal_misc_Unsafe,     getBoolean_name, getBoolean_signature,         F_RN)  \
+  do_intrinsic(_getByte,            jdk_internal_misc_Unsafe,     getByte_name, getByte_signature,               F_RN)  \
+  do_intrinsic(_getShort,           jdk_internal_misc_Unsafe,     getShort_name, getShort_signature,             F_RN)  \
+  do_intrinsic(_getChar,            jdk_internal_misc_Unsafe,     getChar_name, getChar_signature,               F_RN)  \
+  do_intrinsic(_getInt,             jdk_internal_misc_Unsafe,     getInt_name, getInt_signature,                 F_RN)  \
+  do_intrinsic(_getLong,            jdk_internal_misc_Unsafe,     getLong_name, getLong_signature,               F_RN)  \
+  do_intrinsic(_getFloat,           jdk_internal_misc_Unsafe,     getFloat_name, getFloat_signature,             F_RN)  \
+  do_intrinsic(_getDouble,          jdk_internal_misc_Unsafe,     getDouble_name, getDouble_signature,           F_RN)  \
+  do_intrinsic(_putObject,          jdk_internal_misc_Unsafe,     putObject_name, putObject_signature,           F_RN)  \
+  do_intrinsic(_putBoolean,         jdk_internal_misc_Unsafe,     putBoolean_name, putBoolean_signature,         F_RN)  \
+  do_intrinsic(_putByte,            jdk_internal_misc_Unsafe,     putByte_name, putByte_signature,               F_RN)  \
+  do_intrinsic(_putShort,           jdk_internal_misc_Unsafe,     putShort_name, putShort_signature,             F_RN)  \
+  do_intrinsic(_putChar,            jdk_internal_misc_Unsafe,     putChar_name, putChar_signature,               F_RN)  \
+  do_intrinsic(_putInt,             jdk_internal_misc_Unsafe,     putInt_name, putInt_signature,                 F_RN)  \
+  do_intrinsic(_putLong,            jdk_internal_misc_Unsafe,     putLong_name, putLong_signature,               F_RN)  \
+  do_intrinsic(_putFloat,           jdk_internal_misc_Unsafe,     putFloat_name, putFloat_signature,             F_RN)  \
+  do_intrinsic(_putDouble,          jdk_internal_misc_Unsafe,     putDouble_name, putDouble_signature,           F_RN)  \
                                                                                                                         \
   do_name(getObjectVolatile_name,"getObjectVolatile")   do_name(putObjectVolatile_name,"putObjectVolatile")             \
   do_name(getBooleanVolatile_name,"getBooleanVolatile") do_name(putBooleanVolatile_name,"putBooleanVolatile")           \
@@ -1025,38 +1026,38 @@
   do_name(getFloatVolatile_name,"getFloatVolatile")     do_name(putFloatVolatile_name,"putFloatVolatile")               \
   do_name(getDoubleVolatile_name,"getDoubleVolatile")   do_name(putDoubleVolatile_name,"putDoubleVolatile")             \
                                                                                                                         \
-  do_intrinsic(_getObjectVolatile,        sun_misc_Unsafe,        getObjectVolatile_name, getObject_signature,   F_RN)  \
-  do_intrinsic(_getBooleanVolatile,       sun_misc_Unsafe,        getBooleanVolatile_name, getBoolean_signature, F_RN)  \
-  do_intrinsic(_getByteVolatile,          sun_misc_Unsafe,        getByteVolatile_name, getByte_signature,       F_RN)  \
-  do_intrinsic(_getShortVolatile,         sun_misc_Unsafe,        getShortVolatile_name, getShort_signature,     F_RN)  \
-  do_intrinsic(_getCharVolatile,          sun_misc_Unsafe,        getCharVolatile_name, getChar_signature,       F_RN)  \
-  do_intrinsic(_getIntVolatile,           sun_misc_Unsafe,        getIntVolatile_name, getInt_signature,         F_RN)  \
-  do_intrinsic(_getLongVolatile,          sun_misc_Unsafe,        getLongVolatile_name, getLong_signature,       F_RN)  \
-  do_intrinsic(_getFloatVolatile,         sun_misc_Unsafe,        getFloatVolatile_name, getFloat_signature,     F_RN)  \
-  do_intrinsic(_getDoubleVolatile,        sun_misc_Unsafe,        getDoubleVolatile_name, getDouble_signature,   F_RN)  \
-  do_intrinsic(_putObjectVolatile,        sun_misc_Unsafe,        putObjectVolatile_name, putObject_signature,   F_RN)  \
-  do_intrinsic(_putBooleanVolatile,       sun_misc_Unsafe,        putBooleanVolatile_name, putBoolean_signature, F_RN)  \
-  do_intrinsic(_putByteVolatile,          sun_misc_Unsafe,        putByteVolatile_name, putByte_signature,       F_RN)  \
-  do_intrinsic(_putShortVolatile,         sun_misc_Unsafe,        putShortVolatile_name, putShort_signature,     F_RN)  \
-  do_intrinsic(_putCharVolatile,          sun_misc_Unsafe,        putCharVolatile_name, putChar_signature,       F_RN)  \
-  do_intrinsic(_putIntVolatile,           sun_misc_Unsafe,        putIntVolatile_name, putInt_signature,         F_RN)  \
-  do_intrinsic(_putLongVolatile,          sun_misc_Unsafe,        putLongVolatile_name, putLong_signature,       F_RN)  \
-  do_intrinsic(_putFloatVolatile,         sun_misc_Unsafe,        putFloatVolatile_name, putFloat_signature,     F_RN)  \
-  do_intrinsic(_putDoubleVolatile,        sun_misc_Unsafe,        putDoubleVolatile_name, putDouble_signature,   F_RN)  \
+  do_intrinsic(_getObjectVolatile,        jdk_internal_misc_Unsafe,     getObjectVolatile_name, getObject_signature,   F_RN)  \
+  do_intrinsic(_getBooleanVolatile,       jdk_internal_misc_Unsafe,     getBooleanVolatile_name, getBoolean_signature, F_RN)  \
+  do_intrinsic(_getByteVolatile,          jdk_internal_misc_Unsafe,     getByteVolatile_name, getByte_signature,       F_RN)  \
+  do_intrinsic(_getShortVolatile,         jdk_internal_misc_Unsafe,     getShortVolatile_name, getShort_signature,     F_RN)  \
+  do_intrinsic(_getCharVolatile,          jdk_internal_misc_Unsafe,     getCharVolatile_name, getChar_signature,       F_RN)  \
+  do_intrinsic(_getIntVolatile,           jdk_internal_misc_Unsafe,     getIntVolatile_name, getInt_signature,         F_RN)  \
+  do_intrinsic(_getLongVolatile,          jdk_internal_misc_Unsafe,     getLongVolatile_name, getLong_signature,       F_RN)  \
+  do_intrinsic(_getFloatVolatile,         jdk_internal_misc_Unsafe,     getFloatVolatile_name, getFloat_signature,     F_RN)  \
+  do_intrinsic(_getDoubleVolatile,        jdk_internal_misc_Unsafe,     getDoubleVolatile_name, getDouble_signature,   F_RN)  \
+  do_intrinsic(_putObjectVolatile,        jdk_internal_misc_Unsafe,     putObjectVolatile_name, putObject_signature,   F_RN)  \
+  do_intrinsic(_putBooleanVolatile,       jdk_internal_misc_Unsafe,     putBooleanVolatile_name, putBoolean_signature, F_RN)  \
+  do_intrinsic(_putByteVolatile,          jdk_internal_misc_Unsafe,     putByteVolatile_name, putByte_signature,       F_RN)  \
+  do_intrinsic(_putShortVolatile,         jdk_internal_misc_Unsafe,     putShortVolatile_name, putShort_signature,     F_RN)  \
+  do_intrinsic(_putCharVolatile,          jdk_internal_misc_Unsafe,     putCharVolatile_name, putChar_signature,       F_RN)  \
+  do_intrinsic(_putIntVolatile,           jdk_internal_misc_Unsafe,     putIntVolatile_name, putInt_signature,         F_RN)  \
+  do_intrinsic(_putLongVolatile,          jdk_internal_misc_Unsafe,     putLongVolatile_name, putLong_signature,       F_RN)  \
+  do_intrinsic(_putFloatVolatile,         jdk_internal_misc_Unsafe,     putFloatVolatile_name, putFloat_signature,     F_RN)  \
+  do_intrinsic(_putDoubleVolatile,        jdk_internal_misc_Unsafe,     putDoubleVolatile_name, putDouble_signature,   F_RN)  \
                                                                                                                         \
   do_name(getShortUnaligned_name,"getShortUnaligned")     do_name(putShortUnaligned_name,"putShortUnaligned")           \
   do_name(getCharUnaligned_name,"getCharUnaligned")       do_name(putCharUnaligned_name,"putCharUnaligned")             \
   do_name(getIntUnaligned_name,"getIntUnaligned")         do_name(putIntUnaligned_name,"putIntUnaligned")               \
   do_name(getLongUnaligned_name,"getLongUnaligned")       do_name(putLongUnaligned_name,"putLongUnaligned")             \
                                                                                                                         \
-  do_intrinsic(_getShortUnaligned,         sun_misc_Unsafe,        getShortUnaligned_name, getShort_signature,     F_R)  \
-  do_intrinsic(_getCharUnaligned,          sun_misc_Unsafe,        getCharUnaligned_name, getChar_signature,       F_R)  \
-  do_intrinsic(_getIntUnaligned,           sun_misc_Unsafe,        getIntUnaligned_name, getInt_signature,         F_R)  \
-  do_intrinsic(_getLongUnaligned,          sun_misc_Unsafe,        getLongUnaligned_name, getLong_signature,       F_R)  \
-  do_intrinsic(_putShortUnaligned,         sun_misc_Unsafe,        putShortUnaligned_name, putShort_signature,     F_R)  \
-  do_intrinsic(_putCharUnaligned,          sun_misc_Unsafe,        putCharUnaligned_name, putChar_signature,       F_R)  \
-  do_intrinsic(_putIntUnaligned,           sun_misc_Unsafe,        putIntUnaligned_name, putInt_signature,         F_R)  \
-  do_intrinsic(_putLongUnaligned,          sun_misc_Unsafe,        putLongUnaligned_name, putLong_signature,       F_R)  \
+  do_intrinsic(_getShortUnaligned,         jdk_internal_misc_Unsafe,    getShortUnaligned_name, getShort_signature,     F_R)  \
+  do_intrinsic(_getCharUnaligned,          jdk_internal_misc_Unsafe,    getCharUnaligned_name, getChar_signature,       F_R)  \
+  do_intrinsic(_getIntUnaligned,           jdk_internal_misc_Unsafe,    getIntUnaligned_name, getInt_signature,         F_R)  \
+  do_intrinsic(_getLongUnaligned,          jdk_internal_misc_Unsafe,    getLongUnaligned_name, getLong_signature,       F_R)  \
+  do_intrinsic(_putShortUnaligned,         jdk_internal_misc_Unsafe,    putShortUnaligned_name, putShort_signature,     F_R)  \
+  do_intrinsic(_putCharUnaligned,          jdk_internal_misc_Unsafe,    putCharUnaligned_name, putChar_signature,       F_R)  \
+  do_intrinsic(_putIntUnaligned,           jdk_internal_misc_Unsafe,    putIntUnaligned_name, putInt_signature,         F_R)  \
+  do_intrinsic(_putLongUnaligned,          jdk_internal_misc_Unsafe,    putLongUnaligned_name, putLong_signature,       F_R)  \
                                                                                                                         \
   /* %%% these are redundant except perhaps for getAddress, but Unsafe has native methods for them */                   \
   do_signature(getByte_raw_signature,     "(J)B")                                                                       \
@@ -1078,66 +1079,67 @@
    do_name(    getAddress_name,           "getAddress")                                                                 \
    do_name(    putAddress_name,           "putAddress")                                                                 \
                                                                                                                         \
-  do_intrinsic(_getByte_raw,              sun_misc_Unsafe,        getByte_name, getByte_raw_signature,           F_RN)  \
-  do_intrinsic(_getShort_raw,             sun_misc_Unsafe,        getShort_name, getShort_raw_signature,         F_RN)  \
-  do_intrinsic(_getChar_raw,              sun_misc_Unsafe,        getChar_name, getChar_raw_signature,           F_RN)  \
-  do_intrinsic(_getInt_raw,               sun_misc_Unsafe,        getInt_name, long_int_signature,               F_RN)  \
-  do_intrinsic(_getLong_raw,              sun_misc_Unsafe,        getLong_name, getLong_raw_signature,           F_RN)  \
-  do_intrinsic(_getFloat_raw,             sun_misc_Unsafe,        getFloat_name, getFloat_raw_signature,         F_RN)  \
-  do_intrinsic(_getDouble_raw,            sun_misc_Unsafe,        getDouble_name, getDouble_raw_signature,       F_RN)  \
-  do_intrinsic(_getAddress_raw,           sun_misc_Unsafe,        getAddress_name, getAddress_raw_signature,     F_RN)  \
-  do_intrinsic(_putByte_raw,              sun_misc_Unsafe,        putByte_name, putByte_raw_signature,           F_RN)  \
-  do_intrinsic(_putShort_raw,             sun_misc_Unsafe,        putShort_name, putShort_raw_signature,         F_RN)  \
-  do_intrinsic(_putChar_raw,              sun_misc_Unsafe,        putChar_name, putChar_raw_signature,           F_RN)  \
-  do_intrinsic(_putInt_raw,               sun_misc_Unsafe,        putInt_name, putInt_raw_signature,             F_RN)  \
-  do_intrinsic(_putLong_raw,              sun_misc_Unsafe,        putLong_name, putLong_raw_signature,           F_RN)  \
-  do_intrinsic(_putFloat_raw,             sun_misc_Unsafe,        putFloat_name, putFloat_raw_signature,         F_RN)  \
-  do_intrinsic(_putDouble_raw,            sun_misc_Unsafe,        putDouble_name, putDouble_raw_signature,       F_RN)  \
-  do_intrinsic(_putAddress_raw,           sun_misc_Unsafe,        putAddress_name, putAddress_raw_signature,     F_RN)  \
+  do_intrinsic(_getByte_raw,              jdk_internal_misc_Unsafe,     getByte_name, getByte_raw_signature,           F_R)  \
+  do_intrinsic(_getShort_raw,             jdk_internal_misc_Unsafe,     getShort_name, getShort_raw_signature,         F_R)  \
+  do_intrinsic(_getChar_raw,              jdk_internal_misc_Unsafe,     getChar_name, getChar_raw_signature,           F_R)  \
+  do_intrinsic(_getInt_raw,               jdk_internal_misc_Unsafe,     getInt_name, long_int_signature,               F_R)  \
+  do_intrinsic(_getLong_raw,              jdk_internal_misc_Unsafe,     getLong_name, getLong_raw_signature,           F_R)  \
+  do_intrinsic(_getFloat_raw,             jdk_internal_misc_Unsafe,     getFloat_name, getFloat_raw_signature,         F_R)  \
+  do_intrinsic(_getDouble_raw,            jdk_internal_misc_Unsafe,     getDouble_name, getDouble_raw_signature,       F_R)  \
+  do_intrinsic(_getAddress_raw,           jdk_internal_misc_Unsafe,     getAddress_name, getAddress_raw_signature,     F_R)  \
+  do_intrinsic(_putByte_raw,              jdk_internal_misc_Unsafe,     putByte_name, putByte_raw_signature,           F_R)  \
+  do_intrinsic(_putShort_raw,             jdk_internal_misc_Unsafe,     putShort_name, putShort_raw_signature,         F_R)  \
+  do_intrinsic(_putChar_raw,              jdk_internal_misc_Unsafe,     putChar_name, putChar_raw_signature,           F_R)  \
+  do_intrinsic(_putInt_raw,               jdk_internal_misc_Unsafe,     putInt_name, putInt_raw_signature,             F_R)  \
+  do_intrinsic(_putLong_raw,              jdk_internal_misc_Unsafe,     putLong_name, putLong_raw_signature,           F_R)  \
+  do_intrinsic(_putFloat_raw,             jdk_internal_misc_Unsafe,     putFloat_name, putFloat_raw_signature,         F_R)  \
+  do_intrinsic(_putDouble_raw,            jdk_internal_misc_Unsafe,     putDouble_name, putDouble_raw_signature,       F_R)  \
+  do_intrinsic(_putAddress_raw,           jdk_internal_misc_Unsafe,     putAddress_name, putAddress_raw_signature,     F_R)  \
                                                                                                                         \
-  do_intrinsic(_compareAndSwapObject,     sun_misc_Unsafe,        compareAndSwapObject_name, compareAndSwapObject_signature, F_RN) \
-   do_name(     compareAndSwapObject_name,                       "compareAndSwapObject")                                \
-   do_signature(compareAndSwapObject_signature,  "(Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Z")          \
-  do_intrinsic(_compareAndSwapLong,       sun_misc_Unsafe,        compareAndSwapLong_name, compareAndSwapLong_signature, F_RN) \
-   do_name(     compareAndSwapLong_name,                         "compareAndSwapLong")                                  \
-   do_signature(compareAndSwapLong_signature,                    "(Ljava/lang/Object;JJJ)Z")                            \
-  do_intrinsic(_compareAndSwapInt,        sun_misc_Unsafe,        compareAndSwapInt_name, compareAndSwapInt_signature, F_RN) \
-   do_name(     compareAndSwapInt_name,                          "compareAndSwapInt")                                   \
-   do_signature(compareAndSwapInt_signature,                     "(Ljava/lang/Object;JII)Z")                            \
-  do_intrinsic(_putOrderedObject,         sun_misc_Unsafe,        putOrderedObject_name, putOrderedObject_signature, F_RN) \
-   do_name(     putOrderedObject_name,                           "putOrderedObject")                                    \
-   do_alias(    putOrderedObject_signature,                     /*(LObject;JLObject;)V*/ putObject_signature)           \
-  do_intrinsic(_putOrderedLong,           sun_misc_Unsafe,        putOrderedLong_name, putOrderedLong_signature, F_RN)  \
-   do_name(     putOrderedLong_name,                             "putOrderedLong")                                      \
-   do_alias(    putOrderedLong_signature,                       /*(Ljava/lang/Object;JJ)V*/ putLong_signature)          \
-  do_intrinsic(_putOrderedInt,            sun_misc_Unsafe,        putOrderedInt_name, putOrderedInt_signature,   F_RN)  \
-   do_name(     putOrderedInt_name,                              "putOrderedInt")                                       \
-   do_alias(    putOrderedInt_signature,                        /*(Ljava/lang/Object;JI)V*/ putInt_signature)           \
+  do_intrinsic(_compareAndSwapObject,     jdk_internal_misc_Unsafe,     compareAndSwapObject_name, compareAndSwapObject_signature, F_R) \
+   do_name(     compareAndSwapObject_name,                              "compareAndSwapObject")                                \
+   do_signature(compareAndSwapObject_signature,                         "(Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Z")          \
+  do_intrinsic(_compareAndSwapLong,       jdk_internal_misc_Unsafe,     compareAndSwapLong_name, compareAndSwapLong_signature, F_R) \
+   do_name(     compareAndSwapLong_name,                                "compareAndSwapLong")                                  \
+   do_signature(compareAndSwapLong_signature,                           "(Ljava/lang/Object;JJJ)Z")                            \
+  do_intrinsic(_compareAndSwapInt,        jdk_internal_misc_Unsafe,     compareAndSwapInt_name, compareAndSwapInt_signature, F_R) \
+   do_name(     compareAndSwapInt_name,                                 "compareAndSwapInt")                                   \
+   do_signature(compareAndSwapInt_signature,                            "(Ljava/lang/Object;JII)Z")                            \
+  do_intrinsic(_putOrderedObject,         jdk_internal_misc_Unsafe,     putOrderedObject_name, putOrderedObject_signature, F_R) \
+   do_name(     putOrderedObject_name,                                  "putOrderedObject")                                    \
+   do_alias(    putOrderedObject_signature,                             /*(LObject;JLObject;)V*/ putObject_signature)           \
+  do_intrinsic(_putOrderedLong,           jdk_internal_misc_Unsafe,     putOrderedLong_name, putOrderedLong_signature, F_R)  \
+   do_name(     putOrderedLong_name,                                    "putOrderedLong")                                      \
+   do_alias(    putOrderedLong_signature,                               /*(Ljava/lang/Object;JJ)V*/ putLong_signature)          \
+  do_intrinsic(_putOrderedInt,            jdk_internal_misc_Unsafe,     putOrderedInt_name, putOrderedInt_signature,   F_R)  \
+   do_name(     putOrderedInt_name,                                     "putOrderedInt")                                       \
+   do_alias(    putOrderedInt_signature,                                 /*(Ljava/lang/Object;JI)V*/ putInt_signature)           \
                                                                                                                         \
-  do_intrinsic(_getAndAddInt,             sun_misc_Unsafe,        getAndAddInt_name, getAndAddInt_signature, F_R)       \
-   do_name(     getAndAddInt_name,                                "getAndAddInt")                                       \
-   do_signature(getAndAddInt_signature,                           "(Ljava/lang/Object;JI)I" )                           \
-  do_intrinsic(_getAndAddLong,            sun_misc_Unsafe,        getAndAddLong_name, getAndAddLong_signature, F_R)     \
-   do_name(     getAndAddLong_name,                               "getAndAddLong")                                      \
-   do_signature(getAndAddLong_signature,                          "(Ljava/lang/Object;JJ)J" )                           \
-  do_intrinsic(_getAndSetInt,             sun_misc_Unsafe,        getAndSetInt_name, getAndSetInt_signature, F_R)       \
-   do_name(     getAndSetInt_name,                                "getAndSetInt")                                       \
-   do_alias(    getAndSetInt_signature,                         /*"(Ljava/lang/Object;JI)I"*/ getAndAddInt_signature)   \
-  do_intrinsic(_getAndSetLong,            sun_misc_Unsafe,        getAndSetLong_name, getAndSetLong_signature, F_R)     \
-   do_name(     getAndSetLong_name,                               "getAndSetLong")                                      \
-   do_alias(    getAndSetLong_signature,                        /*"(Ljava/lang/Object;JJ)J"*/ getAndAddLong_signature)  \
-  do_intrinsic(_getAndSetObject,          sun_misc_Unsafe,        getAndSetObject_name, getAndSetObject_signature,  F_R)\
-   do_name(     getAndSetObject_name,                             "getAndSetObject")                                    \
-   do_signature(getAndSetObject_signature,                        "(Ljava/lang/Object;JLjava/lang/Object;)Ljava/lang/Object;" ) \
-                                                                                                                        \
-   /* (2) Bytecode intrinsics                                                                        */                 \
-                                                                                                                        \
-  do_intrinsic(_park,                     sun_misc_Unsafe,        park_name, park_signature,                     F_RN)  \
-   do_name(     park_name,                                       "park")                                                \
-   do_signature(park_signature,                                  "(ZJ)V")                                               \
-  do_intrinsic(_unpark,                   sun_misc_Unsafe,        unpark_name, unpark_signature,                 F_RN)  \
-   do_name(     unpark_name,                                     "unpark")                                              \
-   do_alias(    unpark_signature,                               /*(LObject;)V*/ object_void_signature)                  \
+  do_intrinsic(_getAndAddInt,             jdk_internal_misc_Unsafe,     getAndAddInt_name, getAndAddInt_signature, F_R)       \
+   do_name(     getAndAddInt_name,                                      "getAndAddInt")                                       \
+   do_signature(getAndAddInt_signature,                                 "(Ljava/lang/Object;JI)I" )                           \
+  do_intrinsic(_getAndAddLong,            jdk_internal_misc_Unsafe,     getAndAddLong_name, getAndAddLong_signature, F_R)     \
+   do_name(     getAndAddLong_name,                                     "getAndAddLong")                                      \
+   do_signature(getAndAddLong_signature,                                "(Ljava/lang/Object;JJ)J" )                           \
+  do_intrinsic(_getAndSetInt,             jdk_internal_misc_Unsafe,     getAndSetInt_name, getAndSetInt_signature, F_R)       \
+   do_name(     getAndSetInt_name,                                      "getAndSetInt")                                       \
+   do_alias(    getAndSetInt_signature,                                 /*"(Ljava/lang/Object;JI)I"*/ getAndAddInt_signature)   \
+  do_intrinsic(_getAndSetLong,            jdk_internal_misc_Unsafe,     getAndSetLong_name, getAndSetLong_signature, F_R)     \
+   do_name(     getAndSetLong_name,                                     "getAndSetLong")                                      \
+   do_alias(    getAndSetLong_signature,                                /*"(Ljava/lang/Object;JJ)J"*/ getAndAddLong_signature)  \
+  do_intrinsic(_getAndSetObject,          jdk_internal_misc_Unsafe,     getAndSetObject_name, getAndSetObject_signature,  F_R)\
+   do_name(     getAndSetObject_name,                                   "getAndSetObject")                                    \
+   do_signature(getAndSetObject_signature,                              "(Ljava/lang/Object;JLjava/lang/Object;)Ljava/lang/Object;" ) \
+                                                                                                                               \
+   /* (2) Bytecode intrinsics                                                                        */                        \
+                                                                                                                               \
+  do_intrinsic(_park,                     jdk_internal_misc_Unsafe,     park_name, park_signature,                     F_R)    \
+   do_name(     park_name,                                              "park")                                                \
+   do_signature(park_signature,                                         "(ZJ)V")                                               \
+  do_intrinsic(_unpark,                   jdk_internal_misc_Unsafe,     unpark_name, unpark_signature,                 F_R)    \
+   do_name(     unpark_name,                                            "unpark")                                              \
+   do_alias(    unpark_signature,                                       /*(LObject;)V*/ object_void_signature)                 \
+                                                                                                                               \
   do_intrinsic(_StringBuilder_void,   java_lang_StringBuilder, object_initializer_name, void_method_signature,     F_R)   \
   do_intrinsic(_StringBuilder_int,    java_lang_StringBuilder, object_initializer_name, int_void_signature,        F_R)   \
   do_intrinsic(_StringBuilder_String, java_lang_StringBuilder, object_initializer_name, string_void_signature,     F_R)   \
@@ -1402,7 +1404,7 @@
 
   // Returns true if a compiler intrinsic is disabled by command-line flags
   // and false otherwise.
-  static bool is_disabled_by_flags(methodHandle method, methodHandle compilation_context);
+  static bool is_disabled_by_flags(const methodHandle& method, const methodHandle& compilation_context);
 };
 
 #endif // SHARE_VM_CLASSFILE_VMSYMBOLS_HPP
--- a/hotspot/src/share/vm/code/compiledIC.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/code/compiledIC.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -462,7 +462,7 @@
 // is_optimized: Compiler has generated an optimized call (i.e., no inline
 // cache) static_bound: The call can be static bound (i.e, no need to use
 // inline cache)
-void CompiledIC::compute_monomorphic_entry(methodHandle method,
+void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
                                            KlassHandle receiver_klass,
                                            bool is_optimized,
                                            bool static_bound,
@@ -594,7 +594,7 @@
 
 // Compute settings for a CompiledStaticCall. Since we might have to set
 // the stub when calling to the interpreter, we need to return arguments.
-void CompiledStaticCall::compute_entry(methodHandle m, StaticCallInfo& info) {
+void CompiledStaticCall::compute_entry(const methodHandle& m, StaticCallInfo& info) {
   nmethod* m_code = m->code();
   info._callee = m;
   if (m_code != NULL && m_code->is_in_use()) {
--- a/hotspot/src/share/vm/code/compiledIC.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/code/compiledIC.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -222,7 +222,7 @@
   // allocation in the code cache fails.
   bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
 
-  static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
+  static void compute_monomorphic_entry(const methodHandle& method, KlassHandle receiver_klass,
                                         bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);
 
   // Location
@@ -324,7 +324,7 @@
   void set(const StaticCallInfo& info);
 
   // Compute entry point given a method
-  static void compute_entry(methodHandle m, StaticCallInfo& info);
+  static void compute_entry(const methodHandle& m, StaticCallInfo& info);
 
   // Stub support
   address find_stub();
--- a/hotspot/src/share/vm/code/debugInfoRec.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/code/debugInfoRec.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -324,7 +324,7 @@
 // must call add_safepoint before: it sets PcDesc and this routine uses
 // the last PcDesc set
 void DebugInformationRecorder::describe_scope(int         pc_offset,
-                                              methodHandle methodH,
+                                              const methodHandle& methodH,
                                               ciMethod*   method,
                                               int         bci,
                                               bool        reexecute,
--- a/hotspot/src/share/vm/code/debugInfoRec.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/code/debugInfoRec.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -98,7 +98,7 @@
   // by add_non_safepoint, and the locals, expressions, and monitors
   // must all be null.
   void describe_scope(int         pc_offset,
-                      methodHandle methodH,
+                      const methodHandle& methodH,
                       ciMethod*   method,
                       int         bci,
                       bool        reexecute,
--- a/hotspot/src/share/vm/code/dependencies.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/code/dependencies.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -151,7 +151,7 @@
 }
 
 void Dependencies::assert_leaf_type(Klass* ctxk) {
-  if (ctxk->oop_is_array()) {
+  if (ctxk->is_array_klass()) {
     // As a special case, support this assertion on an array type,
     // which reduces to an assertion on its element type.
     // Note that this cannot be done with assertions that
@@ -1084,10 +1084,10 @@
       return true;  // Must punt the assertion to true.
     Klass* k = ctxk;
     Method* lm = k->lookup_method(m->name(), m->signature());
-    if (lm == NULL && k->oop_is_instance()) {
+    if (lm == NULL && k->is_instance_klass()) {
       // It might be an interface method
-        lm = ((InstanceKlass*)k)->lookup_method_in_ordered_interfaces(m->name(),
-                                                                m->signature());
+      lm = InstanceKlass::cast(k)->lookup_method_in_ordered_interfaces(m->name(),
+                                                                 m->signature());
     }
     if (lm == m)
       // Method m is inherited into ctxk.
@@ -1135,7 +1135,7 @@
   bool is_witness(Klass* k) {
     if (doing_subtype_search()) {
       return Dependencies::is_concrete_klass(k);
-    } else if (!k->oop_is_instance()) {
+    } else if (!k->is_instance_klass()) {
       return false; // no methods to find in an array type
     } else {
       // Search class hierarchy first.
@@ -1840,20 +1840,20 @@
     Klass* k = str.klass();
     switch (str.change_type()) {
     case Change_new_type:
-      tty->print_cr("  dependee = %s", InstanceKlass::cast(k)->external_name());
+      tty->print_cr("  dependee = %s", k->external_name());
       break;
     case Change_new_sub:
       if (!WizardMode) {
         ++nsup;
       } else {
-        tty->print_cr("  context super = %s", InstanceKlass::cast(k)->external_name());
+        tty->print_cr("  context super = %s", k->external_name());
       }
       break;
     case Change_new_impl:
       if (!WizardMode) {
         ++nint;
       } else {
-        tty->print_cr("  context interface = %s", InstanceKlass::cast(k)->external_name());
+        tty->print_cr("  context interface = %s", k->external_name());
       }
       break;
     }
@@ -1885,7 +1885,7 @@
   case Change_new_sub:
     // 6598190: brackets workaround Sun Studio C++ compiler bug 6629277
     {
-      _klass = InstanceKlass::cast(_klass)->super();
+      _klass = _klass->super();
       if (_klass != NULL) {
         return true;
       }
@@ -1931,7 +1931,7 @@
 }
 
 bool KlassDepChange::involves_context(Klass* k) {
-  if (k == NULL || !k->oop_is_instance()) {
+  if (k == NULL || !k->is_instance_klass()) {
     return false;
   }
   InstanceKlass* ik = InstanceKlass::cast(k);
--- a/hotspot/src/share/vm/code/dependencies.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/code/dependencies.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -351,7 +351,7 @@
 #if INCLUDE_JVMCI
  private:
   static void check_ctxk(Klass* ctxk) {
-    assert(ctxk->oop_is_instance(), "java types only");
+    assert(ctxk->is_instance_klass(), "java types only");
   }
   static void check_ctxk_abstract(Klass* ctxk) {
     check_ctxk(ctxk);
--- a/hotspot/src/share/vm/code/nmethod.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -558,7 +558,7 @@
 #endif
 }
 
-nmethod* nmethod::new_native_nmethod(methodHandle method,
+nmethod* nmethod::new_native_nmethod(const methodHandle& method,
   int compile_id,
   CodeBuffer *code_buffer,
   int vep_offset,
@@ -596,7 +596,7 @@
   return nm;
 }
 
-nmethod* nmethod::new_nmethod(methodHandle method,
+nmethod* nmethod::new_nmethod(const methodHandle& method,
   int compile_id,
   int entry_bci,
   CodeOffsets* offsets,
@@ -1628,7 +1628,11 @@
         // During GC the is_alive closure is non-NULL, and is used to
         // determine liveness of dependees that need to be updated.
         if (is_alive == NULL || klass->is_loader_alive(is_alive)) {
-          InstanceKlass::cast(klass)->remove_dependent_nmethod(this);
+          // The GC defers deletion of this entry, since there might be multiple threads
+          // iterating over the _dependencies graph. Other call paths are single-threaded
+          // and may delete it immediately.
+          bool delete_immediately = is_alive == NULL;
+          InstanceKlass::cast(klass)->remove_dependent_nmethod(this, delete_immediately);
         }
       }
     }
@@ -3017,7 +3021,7 @@
     deps.print_dependency();
     Klass* ctxk = deps.context_type();
     if (ctxk != NULL) {
-      if (ctxk->oop_is_instance() && ((InstanceKlass*)ctxk)->is_dependent_nmethod(this)) {
+      if (ctxk->is_instance_klass() && InstanceKlass::cast(ctxk)->is_dependent_nmethod(this)) {
         tty->print_cr("   [nmethod<=klass]%s", ctxk->external_name());
       }
     }
--- a/hotspot/src/share/vm/code/nmethod.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -307,7 +307,7 @@
 
  public:
   // create nmethod with entry_bci
-  static nmethod* new_nmethod(methodHandle method,
+  static nmethod* new_nmethod(const methodHandle& method,
                               int compile_id,
                               int entry_bci,
                               CodeOffsets* offsets,
@@ -327,7 +327,7 @@
 #endif
                              );
 
-  static nmethod* new_native_nmethod(methodHandle method,
+  static nmethod* new_native_nmethod(const methodHandle& method,
                                      int compile_id,
                                      CodeBuffer *code_buffer,
                                      int vep_offset,
--- a/hotspot/src/share/vm/compiler/abstractCompiler.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/compiler/abstractCompiler.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -110,7 +110,7 @@
   // Missing feature tests
   virtual bool supports_native()                 { return true; }
   virtual bool supports_osr   ()                 { return true; }
-  virtual bool can_compile_method(methodHandle method)  { return true; }
+  virtual bool can_compile_method(const methodHandle& method)  { return true; }
 
   // Determine if the current compiler provides an intrinsic
   // for method 'method'. An intrinsic is available if:
@@ -141,7 +141,7 @@
   // disable intrinsics on a per-method basis. In cases (2) and (3) the
   // compilation context is aClass::aMethod and java.lang.ref.Reference::get,
   // respectively.
-  virtual bool is_intrinsic_available(methodHandle method, methodHandle compilation_context) {
+  virtual bool is_intrinsic_available(const methodHandle& method, const methodHandle& compilation_context) {
     return is_intrinsic_supported(method) &&
            !vmIntrinsics::is_disabled_by_flags(method, compilation_context);
   }
@@ -154,7 +154,7 @@
   // by default no intrinsics are supported by a compiler except
   // the ones listed in the method. Overriding methods should conform
   // to this behavior.
-  virtual bool is_intrinsic_supported(methodHandle method) {
+  virtual bool is_intrinsic_supported(const methodHandle& method) {
     return false;
   }
 
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -777,10 +777,10 @@
 // CompileBroker::compile_method
 //
 // Request compilation of a method.
-void CompileBroker::compile_method_base(methodHandle method,
+void CompileBroker::compile_method_base(const methodHandle& method,
                                         int osr_bci,
                                         int comp_level,
-                                        methodHandle hot_method,
+                                        const methodHandle& hot_method,
                                         int hot_count,
                                         const char* comment,
                                         Thread* thread) {
@@ -790,7 +790,7 @@
   }
 
   guarantee(!method->is_abstract(), "cannot compile abstract methods");
-  assert(method->method_holder()->oop_is_instance(),
+  assert(method->method_holder()->is_instance_klass(),
          "sanity check");
   assert(!method->method_holder()->is_not_initialized(),
          "method holder must be initialized");
@@ -980,12 +980,12 @@
 }
 
 
-nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
+nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
                                        int comp_level,
-                                       methodHandle hot_method, int hot_count,
+                                       const methodHandle& hot_method, int hot_count,
                                        const char* comment, Thread* THREAD) {
   // make sure arguments make sense
-  assert(method->method_holder()->oop_is_instance(), "not an instance method");
+  assert(method->method_holder()->is_instance_klass(), "not an instance method");
   assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range");
   assert(!method->is_abstract() && (osr_bci == InvocationEntryBci || !method->is_native()), "cannot compile abstract/native methods");
   assert(!method->method_holder()->is_not_initialized(), "method holder must be initialized");
@@ -1121,7 +1121,7 @@
 // CompileBroker::compilation_is_complete
 //
 // See if compilation of this method is already complete.
-bool CompileBroker::compilation_is_complete(methodHandle method,
+bool CompileBroker::compilation_is_complete(const methodHandle& method,
                                             int          osr_bci,
                                             int          comp_level) {
   bool is_osr = (osr_bci != standard_entry_bci);
@@ -1154,7 +1154,7 @@
  * versa).  This can be remedied by a full queue search to disambiguate
  * cases.  If it is deemed profitable, this may be done.
  */
-bool CompileBroker::compilation_is_in_queue(methodHandle method) {
+bool CompileBroker::compilation_is_in_queue(const methodHandle& method) {
   return method->queued_for_compilation();
 }
 
@@ -1162,7 +1162,7 @@
 // CompileBroker::compilation_is_prohibited
 //
 // See if this compilation is not allowed.
-bool CompileBroker::compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level) {
+bool CompileBroker::compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level) {
   bool is_native = method->is_native();
   // Some compilers may not support the compilation of natives.
   AbstractCompiler *comp = compiler(comp_level);
@@ -1205,7 +1205,7 @@
  * and the ID is not within the specified range, the method is not compiled and 0 is returned.
  * The function also allows to generate separate compilation IDs for OSR compilations.
  */
-int CompileBroker::assign_compile_id(methodHandle method, int osr_bci) {
+int CompileBroker::assign_compile_id(const methodHandle& method, int osr_bci) {
 #ifdef ASSERT
   bool is_osr = (osr_bci != standard_entry_bci);
   int id;
@@ -1240,7 +1240,7 @@
 // CompileBroker::assign_compile_id_unlocked
 //
 // Public wrapper for assign_compile_id that acquires the needed locks
-uint CompileBroker::assign_compile_id_unlocked(Thread* thread, methodHandle method, int osr_bci) {
+uint CompileBroker::assign_compile_id_unlocked(Thread* thread, const methodHandle& method, int osr_bci) {
   MutexLocker locker(MethodCompileQueue_lock, thread);
   return assign_compile_id(method, osr_bci);
 }
@@ -1257,7 +1257,7 @@
 
 // ------------------------------------------------------------------
 // CompileBroker::preload_classes
-void CompileBroker::preload_classes(methodHandle method, TRAPS) {
+void CompileBroker::preload_classes(const methodHandle& method, TRAPS) {
   // Move this code over from c1_Compiler.cpp
   ShouldNotReachHere();
 }
@@ -1270,10 +1270,10 @@
 // compilation.  Add this task to the queue.
 CompileTask* CompileBroker::create_compile_task(CompileQueue* queue,
                                               int           compile_id,
-                                              methodHandle  method,
+                                              const methodHandle&  method,
                                               int           osr_bci,
                                               int           comp_level,
-                                              methodHandle  hot_method,
+                                              const methodHandle&  hot_method,
                                               int           hot_count,
                                               const char*   comment,
                                               bool          blocking) {
@@ -1866,7 +1866,7 @@
 // CompileBroker::set_last_compile
 //
 // Record this compilation for debugging purposes.
-void CompileBroker::set_last_compile(CompilerThread* thread, methodHandle method, bool is_osr, int comp_level) {
+void CompileBroker::set_last_compile(CompilerThread* thread, const methodHandle& method, bool is_osr, int comp_level) {
   ResourceMark rm;
   char* method_name = method->name()->as_C_string();
   strncpy(_last_method_compiled, method_name, CompileBroker::name_buffer_length);
@@ -1952,7 +1952,7 @@
 // CompileBroker::check_break_at
 //
 // Should the compilation break at the current compilation.
-bool CompileBroker::check_break_at(methodHandle method, int compile_id, bool is_osr) {
+bool CompileBroker::check_break_at(const methodHandle& method, int compile_id, bool is_osr) {
   if (CICountOSR && is_osr && (compile_id == CIBreakAtOSR)) {
     return true;
   } else if( CompilerOracle::should_break_at(method) ) { // break when compiling
--- a/hotspot/src/share/vm/compiler/compileBroker.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/compiler/compileBroker.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -144,7 +144,7 @@
 
   // Compile type Information for print_last_compile() and CompilerCounters
   enum { no_compile, normal_compile, osr_compile, native_compile };
-  static int assign_compile_id (methodHandle method, int osr_bci);
+  static int assign_compile_id (const methodHandle& method, int osr_bci);
 
 
  private:
@@ -217,17 +217,17 @@
 
   static JavaThread* make_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, bool compiler_thread, TRAPS);
   static void init_compiler_sweeper_threads(int c1_compiler_count, int c2_compiler_count);
-  static bool compilation_is_complete  (methodHandle method, int osr_bci, int comp_level);
-  static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level);
+  static bool compilation_is_complete  (const methodHandle& method, int osr_bci, int comp_level);
+  static bool compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level);
   static bool is_compile_blocking();
-  static void preload_classes          (methodHandle method, TRAPS);
+  static void preload_classes          (const methodHandle& method, TRAPS);
 
   static CompileTask* create_compile_task(CompileQueue* queue,
                                           int           compile_id,
-                                          methodHandle  method,
+                                          const methodHandle&  method,
                                           int           osr_bci,
                                           int           comp_level,
-                                          methodHandle  hot_method,
+                                          const methodHandle&  hot_method,
                                           int           hot_count,
                                           const char*   comment,
                                           bool          blocking);
@@ -235,16 +235,16 @@
 
   static void invoke_compiler_on_method(CompileTask* task);
   static void post_compile(CompilerThread* thread, CompileTask* task, EventCompilation& event, bool success, ciEnv* ci_env);
-  static void set_last_compile(CompilerThread *thread, methodHandle method, bool is_osr, int comp_level);
+  static void set_last_compile(CompilerThread *thread, const methodHandle& method, bool is_osr, int comp_level);
   static void push_jni_handle_block();
   static void pop_jni_handle_block();
-  static bool check_break_at(methodHandle method, int compile_id, bool is_osr);
+  static bool check_break_at(const methodHandle& method, int compile_id, bool is_osr);
   static void collect_statistics(CompilerThread* thread, elapsedTimer time, CompileTask* task);
 
-  static void compile_method_base(methodHandle method,
+  static void compile_method_base(const methodHandle& method,
                                   int osr_bci,
                                   int comp_level,
-                                  methodHandle hot_method,
+                                  const methodHandle& hot_method,
                                   int hot_count,
                                   const char* comment,
                                   Thread* thread);
@@ -265,7 +265,7 @@
     return NULL;
   }
 
-  static bool compilation_is_in_queue(methodHandle method);
+  static bool compilation_is_in_queue(const methodHandle& method);
   static void print_compile_queues(outputStream* st);
   static int queue_size(int comp_level) {
     CompileQueue *q = compile_queue(comp_level);
@@ -273,15 +273,15 @@
   }
   static void compilation_init();
   static void init_compiler_thread_log();
-  static nmethod* compile_method(methodHandle method,
+  static nmethod* compile_method(const methodHandle& method,
                                  int osr_bci,
                                  int comp_level,
-                                 methodHandle hot_method,
+                                 const methodHandle& hot_method,
                                  int hot_count,
                                  const char* comment, Thread* thread);
 
   // Acquire any needed locks and assign a compile id
-  static uint assign_compile_id_unlocked(Thread* thread, methodHandle method, int osr_bci);
+  static uint assign_compile_id_unlocked(Thread* thread, const methodHandle& method, int osr_bci);
 
   static void compiler_thread_loop();
   static uint get_compilation_id() { return _compilation_id; }
--- a/hotspot/src/share/vm/compiler/compileTask.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/compiler/compileTask.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,10 +75,10 @@
 
 
 void CompileTask::initialize(int compile_id,
-                             methodHandle method,
+                             const methodHandle& method,
                              int osr_bci,
                              int comp_level,
-                             methodHandle hot_method,
+                             const methodHandle& hot_method,
                              int hot_count,
                              const char* comment,
                              bool is_blocking) {
--- a/hotspot/src/share/vm/compiler/compileTask.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/compiler/compileTask.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -71,8 +71,8 @@
     _lock = new Monitor(Mutex::nonleaf+2, "CompileTaskLock");
   }
 
-  void initialize(int compile_id, methodHandle method, int osr_bci, int comp_level,
-                  methodHandle hot_method, int hot_count, const char* comment,
+  void initialize(int compile_id, const methodHandle& method, int osr_bci, int comp_level,
+                  const methodHandle& hot_method, int hot_count, const char* comment,
                   bool is_blocking);
 
   static CompileTask* allocate();
--- a/hotspot/src/share/vm/compiler/compilerOracle.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/compiler/compilerOracle.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -313,7 +313,7 @@
 }
 
 template<typename T>
-bool CompilerOracle::has_option_value(methodHandle method, const char* option, T& value) {
+bool CompilerOracle::has_option_value(const methodHandle& method, const char* option, T& value) {
   if (option_list != NULL) {
     TypedMethodOptionMatcher* m = option_list->match(method, option, get_type_for<T>());
     if (m != NULL) {
@@ -325,19 +325,19 @@
 }
 
 // Explicit instantiation for all OptionTypes supported.
-template bool CompilerOracle::has_option_value<intx>(methodHandle method, const char* option, intx& value);
-template bool CompilerOracle::has_option_value<uintx>(methodHandle method, const char* option, uintx& value);
-template bool CompilerOracle::has_option_value<bool>(methodHandle method, const char* option, bool& value);
-template bool CompilerOracle::has_option_value<ccstr>(methodHandle method, const char* option, ccstr& value);
-template bool CompilerOracle::has_option_value<double>(methodHandle method, const char* option, double& value);
+template bool CompilerOracle::has_option_value<intx>(const methodHandle& method, const char* option, intx& value);
+template bool CompilerOracle::has_option_value<uintx>(const methodHandle& method, const char* option, uintx& value);
+template bool CompilerOracle::has_option_value<bool>(const methodHandle& method, const char* option, bool& value);
+template bool CompilerOracle::has_option_value<ccstr>(const methodHandle& method, const char* option, ccstr& value);
+template bool CompilerOracle::has_option_value<double>(const methodHandle& method, const char* option, double& value);
 
-bool CompilerOracle::has_option_string(methodHandle method, const char* option) {
+bool CompilerOracle::has_option_string(const methodHandle& method, const char* option) {
   bool value = false;
   has_option_value(method, option, value);
   return value;
 }
 
-bool CompilerOracle::should_exclude(methodHandle method, bool& quietly) {
+bool CompilerOracle::should_exclude(const methodHandle& method, bool& quietly) {
   quietly = true;
   if (lists[ExcludeCommand] != NULL) {
     if (lists[ExcludeCommand]->match(method)) {
@@ -352,17 +352,17 @@
   return false;
 }
 
-bool CompilerOracle::should_inline(methodHandle method) {
+bool CompilerOracle::should_inline(const methodHandle& method) {
   return (check_predicate(InlineCommand, method));
 }
 
 // Check both DontInlineCommand and ExcludeCommand here
 // - consistent behavior for all compilers
-bool CompilerOracle::should_not_inline(methodHandle method) {
+bool CompilerOracle::should_not_inline(const methodHandle& method) {
   return check_predicate(DontInlineCommand, method) || check_predicate(ExcludeCommand, method);
 }
 
-bool CompilerOracle::should_print(methodHandle method) {
+bool CompilerOracle::should_print(const methodHandle& method) {
   return check_predicate(PrintCommand, method);
 }
 
@@ -370,13 +370,13 @@
   return lists[PrintCommand] != NULL;
 }
 
-bool CompilerOracle::should_log(methodHandle method) {
+bool CompilerOracle::should_log(const methodHandle& method) {
   if (!LogCompilation)            return false;
   if (lists[LogCommand] == NULL)  return true;  // by default, log all
   return (check_predicate(LogCommand, method));
 }
 
-bool CompilerOracle::should_break_at(methodHandle method) {
+bool CompilerOracle::should_break_at(const methodHandle& method) {
   return check_predicate(BreakCommand, method);
 }
 
@@ -756,7 +756,7 @@
   stream.cr();
 }
 
-void CompilerOracle::append_exclude_to_file(methodHandle method) {
+void CompilerOracle::append_exclude_to_file(const methodHandle& method) {
   assert(has_command_file(), "command file must be specified");
   fileStream stream(fopen(cc_file(), "at"));
   stream.print("exclude ");
--- a/hotspot/src/share/vm/compiler/compilerOracle.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/compiler/compilerOracle.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,30 +46,30 @@
   static void parse_from_file();
 
   // Tells whether we to exclude compilation of method
-  static bool should_exclude(methodHandle method, bool& quietly);
+  static bool should_exclude(const methodHandle& method, bool& quietly);
 
   // Tells whether we want to inline this method
-  static bool should_inline(methodHandle method);
+  static bool should_inline(const methodHandle& method);
 
   // Tells whether we want to disallow inlining of this method
-  static bool should_not_inline(methodHandle method);
+  static bool should_not_inline(const methodHandle& method);
 
   // Tells whether we should print the assembly for this method
-  static bool should_print(methodHandle method);
+  static bool should_print(const methodHandle& method);
 
   // Tells whether we should log the compilation data for this method
-  static bool should_log(methodHandle method);
+  static bool should_log(const methodHandle& method);
 
   // Tells whether to break when compiling method
-  static bool should_break_at(methodHandle method);
+  static bool should_break_at(const methodHandle& method);
 
   // Check to see if this method has option set for it
-  static bool has_option_string(methodHandle method, const char * option);
+  static bool has_option_string(const methodHandle& method, const char * option);
 
   // Check if method has option and value set. If yes, overwrite value and return true,
   // otherwise leave value unchanged and return false.
   template<typename T>
-  static bool has_option_value(methodHandle method, const char* option, T& value);
+  static bool has_option_value(const methodHandle& method, const char* option, T& value);
 
   // Reads from string instead of file
   static void parse_from_string(const char* command_string, void (*parser)(char*));
@@ -79,7 +79,7 @@
 
   // For updating the oracle file
   static void append_comment_to_file(const char* message);
-  static void append_exclude_to_file(methodHandle method);
+  static void append_exclude_to_file(const methodHandle& method);
 
   // Tells whether there are any methods to print for print_method_statistics()
   static bool should_print_methods();
--- a/hotspot/src/share/vm/compiler/disassembler.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/compiler/disassembler.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -86,6 +86,12 @@
   os::jvm_path(buf, sizeof(buf));
   int jvm_offset = -1;
   int lib_offset = -1;
+#ifdef STATIC_BUILD
+  char* p = strrchr(buf, '/');
+  *p = '\0';
+  strcat(p, "/lib/");
+  lib_offset = jvm_offset = strlen(buf);
+#else
   {
     // Match "jvm[^/]*" in jvm_path.
     const char* base = buf;
@@ -94,6 +100,7 @@
     p = strstr(p ? p : base, "jvm");
     if (p != NULL)  jvm_offset = p - base;
   }
+#endif
   // Find the disassembler shared library.
   // Search for several paths derived from libjvm, in this order:
   // 1. <home>/jre/lib/<arch>/<vm>/libhsdis-<arch>.so  (for compatibility)
--- a/hotspot/src/share/vm/compiler/methodMatcher.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/compiler/methodMatcher.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -306,7 +306,7 @@
   }
 }
 
-bool MethodMatcher::matches(methodHandle method) const {
+bool MethodMatcher::matches(const methodHandle& method) const {
   Symbol* class_name  = method->method_holder()->name();
   Symbol* method_name = method->name();
   Symbol* signature = method->signature();
--- a/hotspot/src/share/vm/compiler/methodMatcher.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/compiler/methodMatcher.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -60,7 +60,7 @@
   void init(Symbol* class_name, Mode class_mode, Symbol* method_name, Mode method_mode, Symbol* signature);
   static void parse_method_pattern(char*& line, const char*& error_msg, MethodMatcher* m);
   static void print_symbol(outputStream* st, Symbol* h, Mode mode);
-  bool matches(methodHandle method) const;
+  bool matches(const methodHandle& method) const;
   void print_base(outputStream* st);
 
  private:
@@ -101,7 +101,7 @@
     return bm;
   }
 
-  bool match(methodHandle method) {
+  bool match(const methodHandle& method) {
     for (BasicMatcher* current = this; current != NULL; current = current->next()) {
       if (current->matches(method)) {
         return true;
--- a/hotspot/src/share/vm/compiler/oopMap.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/compiler/oopMap.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -73,8 +73,8 @@
 
   // Constructors
   OopMapValue () { set_value(0); set_content_reg(VMRegImpl::Bad()); }
-  OopMapValue (VMReg reg, oop_types t) { set_reg_type(reg,t); }
-  OopMapValue (VMReg reg, oop_types t, VMReg reg2) { set_reg_type(reg,t); set_content_reg(reg2); }
+  OopMapValue (VMReg reg, oop_types t) { set_reg_type(reg, t); set_content_reg(VMRegImpl::Bad()); }
+  OopMapValue (VMReg reg, oop_types t, VMReg reg2) { set_reg_type(reg, t); set_content_reg(reg2); }
   OopMapValue (CompressedReadStream* stream) { read_from(stream); }
 
   // Archiving
@@ -87,7 +87,7 @@
 
   void read_from(CompressedReadStream* stream) {
     set_value(stream->read_int());
-    if(is_callee_saved() || is_derived_oop()) {
+    if (is_callee_saved() || is_derived_oop()) {
       set_content_reg(VMRegImpl::as_VMReg(stream->read_int(), true));
     }
   }
--- a/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -73,11 +73,7 @@
 }
 
 // Constructor
-CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
-  MemRegion mr, bool use_adaptive_freelists,
-  FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
-  _dictionaryChoice(dictionaryChoice),
-  _adaptive_freelists(use_adaptive_freelists),
+CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
   _bt(bs, mr),
   // free list locks are in the range of values taken by _lockRank
   // This range currently is [_leaf+2, _leaf+3]
@@ -100,48 +96,17 @@
          "FreeChunk is larger than expected");
   _bt.set_space(this);
   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
-  // We have all of "mr", all of which we place in the dictionary
-  // as one big chunk. We'll need to decide here which of several
-  // possible alternative dictionary implementations to use. For
-  // now the choice is easy, since we have only one working
-  // implementation, namely, the simple binary tree (splaying
-  // temporarily disabled).
-  switch (dictionaryChoice) {
-    case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
-      _dictionary = new AFLBinaryTreeDictionary(mr);
-      break;
-    case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
-    case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
-    default:
-      warning("dictionaryChoice: selected option not understood; using"
-              " default BinaryTreeDictionary implementation instead.");
-  }
+
+  _dictionary = new AFLBinaryTreeDictionary(mr);
+
   assert(_dictionary != NULL, "CMS dictionary initialization");
   // The indexed free lists are initially all empty and are lazily
   // filled in on demand. Initialize the array elements to NULL.
   initializeIndexedFreeListArray();
 
-  // Not using adaptive free lists assumes that allocation is first
-  // from the linAB's.  Also a cms perm gen which can be compacted
-  // has to have the klass's klassKlass allocated at a lower
-  // address in the heap than the klass so that the klassKlass is
-  // moved to its new location before the klass is moved.
-  // Set the _refillSize for the linear allocation blocks
-  if (!use_adaptive_freelists) {
-    FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
-                                           FreeBlockDictionary<FreeChunk>::atLeast);
-    // The small linAB initially has all the space and will allocate
-    // a chunk of any size.
-    HeapWord* addr = (HeapWord*) fc;
-    _smallLinearAllocBlock.set(addr, fc->size() ,
-      1024*SmallForLinearAlloc, fc->size());
-    // Note that _unallocated_block is not updated here.
-    // Allocations from the linear allocation block should
-    // update it.
-  } else {
-    _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
-                               SmallForLinearAlloc);
-  }
+  _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
+                             SmallForLinearAlloc);
+
   // CMSIndexedFreeListReplenish should be at least 1
   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
   _promoInfo.setSpace(this);
@@ -297,22 +262,7 @@
   MemRegion mr(compaction_top(), end());
   reset(mr);
   // Now refill the linear allocation block(s) if possible.
-  if (_adaptive_freelists) {
-    refillLinearAllocBlocksIfNeeded();
-  } else {
-    // Place as much of mr in the linAB as we can get,
-    // provided it was big enough to go into the dictionary.
-    FreeChunk* fc = dictionary()->find_largest_dict();
-    if (fc != NULL) {
-      assert(fc->size() == mr.word_size(),
-             "Why was the chunk broken up?");
-      removeChunkFromDictionary(fc);
-      HeapWord* addr = (HeapWord*) fc;
-      _smallLinearAllocBlock.set(addr, fc->size() ,
-        1024*SmallForLinearAlloc, fc->size());
-      // Note that _unallocated_block is not updated here.
-    }
-  }
+  refillLinearAllocBlocksIfNeeded();
 }
 
 // Walks the entire dictionary, returning a coterminal
@@ -445,8 +395,7 @@
 
   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 
-  st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
-               _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
+  st->print_cr(" _fitStrategy = %s", BOOL_TO_STR(_fitStrategy));
 }
 
 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
@@ -617,23 +566,9 @@
       // Now, take this new chunk and add it to the free blocks.
       // Note that the BOT has not yet been updated for this block.
       size_t newFcSize = pointer_delta(value, prevEnd);
-      // XXX This is REALLY UGLY and should be fixed up. XXX
-      if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
-        // Mark the boundary of the new block in BOT
-        _bt.mark_block(prevEnd, value);
-        // put it all in the linAB
-        MutexLockerEx x(parDictionaryAllocLock(),
-                        Mutex::_no_safepoint_check_flag);
-        _smallLinearAllocBlock._ptr = prevEnd;
-        _smallLinearAllocBlock._word_size = newFcSize;
-        repairLinearAllocBlock(&_smallLinearAllocBlock);
-        // Births of chunks put into a LinAB are not recorded.  Births
-        // of chunks as they are allocated out of a LinAB are.
-      } else {
-        // Add the block to the free lists, if possible coalescing it
-        // with the last free block, and update the BOT and census data.
-        addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
-      }
+      // Add the block to the free lists, if possible coalescing it
+      // with the last free block, and update the BOT and census data.
+      addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
     }
   }
 }
@@ -1177,11 +1112,7 @@
   assert(size == adjustObjectSize(size),
          "use adjustObjectSize() before calling into allocate()");
 
-  if (_adaptive_freelists) {
-    res = allocate_adaptive_freelists(size);
-  } else {  // non-adaptive free lists
-    res = allocate_non_adaptive_freelists(size);
-  }
+  res = allocate_adaptive_freelists(size);
 
   if (res != NULL) {
     // check that res does lie in this space!
@@ -1203,27 +1134,6 @@
   return res;
 }
 
-HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
-  HeapWord* res = NULL;
-  // try and use linear allocation for smaller blocks
-  if (size < _smallLinearAllocBlock._allocation_size_limit) {
-    // if successful, the following also adjusts block offset table
-    res = getChunkFromSmallLinearAllocBlock(size);
-  }
-  // Else triage to indexed lists for smaller sizes
-  if (res == NULL) {
-    if (size < SmallForDictionary) {
-      res = (HeapWord*) getChunkFromIndexedFreeList(size);
-    } else {
-      // else get it from the big dictionary; if even this doesn't
-      // work we are out of luck.
-      res = (HeapWord*)getChunkFromDictionaryExact(size);
-    }
-  }
-
-  return res;
-}
-
 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
   assert_lock_strong(freelistLock());
   HeapWord* res = NULL;
@@ -1281,9 +1191,6 @@
   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
   // is added because the dictionary may over-allocate to avoid fragmentation.
   size_t space = obj_size;
-  if (!_adaptive_freelists) {
-    space = MAX2(space, _smallLinearAllocBlock._refillSize);
-  }
   space += _promoInfo.refillSize() + 2 * MinChunkSize;
   return space;
 }
@@ -1698,11 +1605,7 @@
   size_t size = fc->size();
   _bt.verify_single_block((HeapWord*) fc, size);
   _bt.verify_not_unallocated((HeapWord*) fc, size);
-  if (_adaptive_freelists) {
-    _indexedFreeList[size].return_chunk_at_tail(fc);
-  } else {
-    _indexedFreeList[size].return_chunk_at_head(fc);
-  }
+  _indexedFreeList[size].return_chunk_at_tail(fc);
 #ifndef PRODUCT
   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
      _indexedFreeList[size].verify_stats();
@@ -1931,10 +1834,6 @@
 void
 CompactibleFreeListSpace::gc_epilogue() {
   assert_locked();
-  if (PrintGCDetails && Verbose && !_adaptive_freelists) {
-    if (_smallLinearAllocBlock._word_size == 0)
-      warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
-  }
   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
   _promoInfo.stopTrackingPromotions();
   repairLinearAllocationBlocks();
@@ -2060,13 +1959,6 @@
   }
 }
 
-// Support for concurrent collection policy decisions.
-bool CompactibleFreeListSpace::should_concurrent_collect() const {
-  // In the future we might want to add in fragmentation stats --
-  // including erosion of the "mountain" into this decision as well.
-  return !adaptive_freelists() && linearAllocationWouldFail();
-}
-
 // Support for compaction
 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
   scan_and_forward(this, cp);
--- a/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -138,15 +138,13 @@
   // Linear allocation blocks
   LinearAllocBlock _smallLinearAllocBlock;
 
-  FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice;
   AFLBinaryTreeDictionary* _dictionary;    // Pointer to dictionary for large size blocks
 
   // Indexed array for small size blocks
   AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize];
 
   // Allocation strategy
-  bool       _fitStrategy;        // Use best fit strategy
-  bool       _adaptive_freelists; // Use adaptive freelists
+  bool _fitStrategy;  // Use best fit strategy
 
   // This is an address close to the largest free chunk in the heap.
   // It is currently assumed to be at the end of the heap.  Free
@@ -204,10 +202,6 @@
   // strategy that attempts to keep the needed number of chunks in each
   // indexed free lists.
   HeapWord* allocate_adaptive_freelists(size_t size);
-  // Allocate from the linear allocation buffers first.  This allocation
-  // strategy assumes maximal coalescing can maintain chunks large enough
-  // to be used as linear allocation buffers.
-  HeapWord* allocate_non_adaptive_freelists(size_t size);
 
   // Gets a chunk from the linear allocation block (LinAB).  If there
   // is not enough space in the LinAB, refills it.
@@ -333,9 +327,7 @@
 
  public:
   // Constructor
-  CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr,
-                           bool use_adaptive_freelists,
-                           FreeBlockDictionary<FreeChunk>::DictionaryChoice);
+  CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr);
   // Accessors
   bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
   FreeBlockDictionary<FreeChunk>* dictionary() const { return _dictionary; }
@@ -349,8 +341,6 @@
   // chunk exists, return NULL.
   FreeChunk* find_chunk_at_end();
 
-  bool adaptive_freelists() const { return _adaptive_freelists; }
-
   void set_collector(CMSCollector* collector) { _collector = collector; }
 
   // Support for parallelization of rescan and marking.
@@ -536,9 +526,6 @@
   void      addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
               bool coalesced);
 
-  // Support for decisions regarding concurrent collection policy.
-  bool should_concurrent_collect() const;
-
   // Support for compaction.
   void prepare_for_compaction(CompactPoint* cp);
   void adjust_pointers();
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -190,9 +190,7 @@
 };
 
 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
-     ReservedSpace rs, size_t initial_byte_size,
-     CardTableRS* ct, bool use_adaptive_freelists,
-     FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
+     ReservedSpace rs, size_t initial_byte_size, CardTableRS* ct) :
   CardGeneration(rs, initial_byte_size, ct),
   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
   _did_compact(false)
@@ -208,9 +206,7 @@
     _numWordsAllocated = 0;
   )
 
-  _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
-                                           use_adaptive_freelists,
-                                           dictionaryChoice);
+  _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end));
   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
   _cmsSpace->_old_gen = this;
 
@@ -1312,13 +1308,6 @@
     }
     return true;
   }
-  if (_cmsSpace->should_concurrent_collect()) {
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print(" %s: collect because cmsSpace says so ",
-        short_name());
-    }
-    return true;
-  }
   return false;
 }
 
@@ -1766,9 +1755,8 @@
     MutexLockerEx hl(Heap_lock, safepoint_check);
     FreelistLocker fll(this);
     MutexLockerEx x(CGC_lock, safepoint_check);
-    if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
-      // The foreground collector is active or we're
-      // not using asynchronous collections.  Skip this
+    if (_foregroundGCIsActive) {
+      // The foreground collector is. Skip this
       // background collection.
       assert(!_foregroundGCShouldWait, "Should be clear");
       return;
@@ -1795,7 +1783,7 @@
   }
 
   // Used for PrintGC
-  size_t prev_used;
+  size_t prev_used = 0;
   if (PrintGC && Verbose) {
     prev_used = _cmsGen->used();
   }
@@ -5214,9 +5202,8 @@
 
   verify_work_stacks_empty();
   // Restore evacuated mark words, if any, used for overflow list links
-  if (!CMSOverflowEarlyRestoration) {
-    restore_preserved_marks_if_any();
-  }
+  restore_preserved_marks_if_any();
+
   verify_overflow_empty();
 }
 
@@ -6186,17 +6173,8 @@
     assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
     assert(_collector->overflow_list_is_empty(),
            "overflow list was drained above");
-    // We could restore evacuated mark words, if any, used for
-    // overflow list links here because the overflow list is
-    // provably empty here. That would reduce the maximum
-    // size requirements for preserved_{oop,mark}_stack.
-    // But we'll just postpone it until we are all done
-    // so we can just stream through.
-    if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
-      _collector->restore_preserved_marks_if_any();
-      assert(_collector->no_preserved_marks(), "No preserved marks");
-    }
-    assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
+
+    assert(_collector->no_preserved_marks(),
            "All preserved marks should have been restored above");
   }
 }
@@ -7372,14 +7350,6 @@
 
   set_freeFinger(freeFinger);
   set_freeRangeInFreeLists(freeRangeInFreeLists);
-  if (CMSTestInFreeList) {
-    if (freeRangeInFreeLists) {
-      FreeChunk* fc = (FreeChunk*) freeFinger;
-      assert(fc->is_free(), "A chunk on the free list should be free.");
-      assert(fc->size() > 0, "Free range should have a size");
-      assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
-    }
-  }
 }
 
 // Note that the sweeper runs concurrently with mutators. Thus,
@@ -7532,12 +7502,7 @@
 
 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
   const size_t size = fc->size();
-  // Chunks that cannot be coalesced are not in the
-  // free lists.
-  if (CMSTestInFreeList && !fc->cantCoalesce()) {
-    assert(_sp->verify_chunk_in_free_list(fc),
-      "free chunk should be in free lists");
-  }
+
   // a chunk that is already free, should not have been
   // marked in the bit map
   HeapWord* const addr = (HeapWord*) fc;
@@ -7550,57 +7515,8 @@
   // See the definition of cantCoalesce().
   if (!fc->cantCoalesce()) {
     // This chunk can potentially be coalesced.
-    if (_sp->adaptive_freelists()) {
-      // All the work is done in
-      do_post_free_or_garbage_chunk(fc, size);
-    } else {  // Not adaptive free lists
-      // this is a free chunk that can potentially be coalesced by the sweeper;
-      if (!inFreeRange()) {
-        // if the next chunk is a free block that can't be coalesced
-        // it doesn't make sense to remove this chunk from the free lists
-        FreeChunk* nextChunk = (FreeChunk*)(addr + size);
-        assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
-        if ((HeapWord*)nextChunk < _sp->end() &&     // There is another free chunk to the right ...
-            nextChunk->is_free()               &&     // ... which is free...
-            nextChunk->cantCoalesce()) {             // ... but can't be coalesced
-          // nothing to do
-        } else {
-          // Potentially the start of a new free range:
-          // Don't eagerly remove it from the free lists.
-          // No need to remove it if it will just be put
-          // back again.  (Also from a pragmatic point of view
-          // if it is a free block in a region that is beyond
-          // any allocated blocks, an assertion will fail)
-          // Remember the start of a free run.
-          initialize_free_range(addr, true);
-          // end - can coalesce with next chunk
-        }
-      } else {
-        // the midst of a free range, we are coalescing
-        print_free_block_coalesced(fc);
-        if (CMSTraceSweeper) {
-          gclog_or_tty->print("  -- pick up free block " PTR_FORMAT " (" SIZE_FORMAT ")\n", p2i(fc), size);
-        }
-        // remove it from the free lists
-        _sp->removeFreeChunkFromFreeLists(fc);
-        set_lastFreeRangeCoalesced(true);
-        // If the chunk is being coalesced and the current free range is
-        // in the free lists, remove the current free range so that it
-        // will be returned to the free lists in its entirety - all
-        // the coalesced pieces included.
-        if (freeRangeInFreeLists()) {
-          FreeChunk* ffc = (FreeChunk*) freeFinger();
-          assert(ffc->size() == pointer_delta(addr, freeFinger()),
-            "Size of free range is inconsistent with chunk size.");
-          if (CMSTestInFreeList) {
-            assert(_sp->verify_chunk_in_free_list(ffc),
-              "free range is not in free lists");
-          }
-          _sp->removeFreeChunkFromFreeLists(ffc);
-          set_freeRangeInFreeLists(false);
-        }
-      }
-    }
+    // All the work is done in
+    do_post_free_or_garbage_chunk(fc, size);
     // Note that if the chunk is not coalescable (the else arm
     // below), we unconditionally flush, without needing to do
     // a "lookahead," as we do below.
@@ -7626,46 +7542,11 @@
   HeapWord* const addr = (HeapWord*) fc;
   const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
 
-  if (_sp->adaptive_freelists()) {
-    // Verify that the bit map has no bits marked between
-    // addr and purported end of just dead object.
-    _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
-
-    do_post_free_or_garbage_chunk(fc, size);
-  } else {
-    if (!inFreeRange()) {
-      // start of a new free range
-      assert(size > 0, "A free range should have a size");
-      initialize_free_range(addr, false);
-    } else {
-      // this will be swept up when we hit the end of the
-      // free range
-      if (CMSTraceSweeper) {
-        gclog_or_tty->print("  -- pick up garbage " PTR_FORMAT " (" SIZE_FORMAT ")\n", p2i(fc), size);
-      }
-      // If the chunk is being coalesced and the current free range is
-      // in the free lists, remove the current free range so that it
-      // will be returned to the free lists in its entirety - all
-      // the coalesced pieces included.
-      if (freeRangeInFreeLists()) {
-        FreeChunk* ffc = (FreeChunk*)freeFinger();
-        assert(ffc->size() == pointer_delta(addr, freeFinger()),
-          "Size of free range is inconsistent with chunk size.");
-        if (CMSTestInFreeList) {
-          assert(_sp->verify_chunk_in_free_list(ffc),
-            "free range is not in free lists");
-        }
-        _sp->removeFreeChunkFromFreeLists(ffc);
-        set_freeRangeInFreeLists(false);
-      }
-      set_lastFreeRangeCoalesced(true);
-    }
-    // this will be swept up when we hit the end of the free range
-
-    // Verify that the bit map has no bits marked between
-    // addr and purported end of just dead object.
-    _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
-  }
+  // Verify that the bit map has no bits marked between
+  // addr and purported end of just dead object.
+  _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
+  do_post_free_or_garbage_chunk(fc, size);
+
   assert(_limit >= addr + size,
          "A freshly garbage chunk can't possibly straddle over _limit");
   if (inFreeRange()) lookahead_and_flush(fc, size);
@@ -7727,11 +7608,7 @@
   // do_post_free_or_garbage_chunk() should only be called in the case
   // of the adaptive free list allocator.
   const bool fcInFreeLists = fc->is_free();
-  assert(_sp->adaptive_freelists(), "Should only be used in this case.");
   assert((HeapWord*)fc <= _limit, "sweep invariant");
-  if (CMSTestInFreeList && fcInFreeLists) {
-    assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
-  }
 
   if (CMSTraceSweeper) {
     gclog_or_tty->print_cr("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
@@ -7739,7 +7616,7 @@
 
   HeapWord* const fc_addr = (HeapWord*) fc;
 
-  bool coalesce;
+  bool coalesce = false;
   const size_t left  = pointer_delta(fc_addr, freeFinger());
   const size_t right = chunkSize;
   switch (FLSCoalescePolicy) {
@@ -7784,10 +7661,6 @@
       FreeChunk* const ffc = (FreeChunk*)freeFinger();
       assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
         "Size of free range is inconsistent with chunk size.");
-      if (CMSTestInFreeList) {
-        assert(_sp->verify_chunk_in_free_list(ffc),
-          "Chunk is not in free lists");
-      }
       _sp->coalDeath(ffc->size());
       _sp->removeFreeChunkFromFreeLists(ffc);
       set_freeRangeInFreeLists(false);
@@ -7856,12 +7729,6 @@
   assert(size > 0,
     "A zero sized chunk cannot be added to the free lists.");
   if (!freeRangeInFreeLists()) {
-    if (CMSTestInFreeList) {
-      FreeChunk* fc = (FreeChunk*) chunk;
-      fc->set_size(size);
-      assert(!_sp->verify_chunk_in_free_list(fc),
-        "chunk should not be in free lists yet");
-    }
     if (CMSTraceSweeper) {
       gclog_or_tty->print_cr(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists",
                     p2i(chunk), size);
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1076,10 +1076,7 @@
   void assert_correct_size_change_locking();
 
  public:
-  ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
-                                CardTableRS* ct,
-                                bool use_adaptive_freelists,
-                                FreeBlockDictionary<FreeChunk>::DictionaryChoice);
+  ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, CardTableRS* ct);
 
   // Accessors
   CMSCollector* collector() const { return _collector; }
@@ -1121,12 +1118,6 @@
   // over-rides
   MemRegion used_region_at_save_marks() const;
 
-  // Does a "full" (forced) collection invoked on this generation collect
-  // the young generation as well?
-  virtual bool full_collects_young_generation() const {
-    return !ScavengeBeforeFullGC;
-  }
-
   // Adjust quantities in the generation affected by
   // the compaction.
   void reset_after_compaction();
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepThread.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepThread.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -79,9 +79,6 @@
   static void makeSurrogateLockerThread(TRAPS);
   static SurrogateLockerThread* slt() { return _slt; }
 
-  // Tester
-  bool is_ConcurrentGC_thread() const { return true;       }
-
   static void threads_do(ThreadClosure* tc);
 
   // Printing
--- a/hotspot/src/share/vm/gc/cms/vmCMSOperations.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/cms/vmCMSOperations.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -138,7 +138,6 @@
     : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */)
   {
     assert(FullGCCount_lock != NULL, "Error");
-    assert(UseAsyncConcMarkSweepGC, "Else will hang caller");
   }
   ~VM_GenCollectFullConcurrent() {}
   virtual VMOp_Type type() const { return VMOp_GenCollectFullConcurrent; }
--- a/hotspot/src/share/vm/gc/g1/concurrentG1Refine.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/concurrentG1Refine.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -30,7 +30,8 @@
 #include "runtime/java.hpp"
 
 ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) :
-  _threads(NULL), _n_threads(0),
+  _threads(NULL),
+  _sample_thread(NULL),
   _hot_card_cache(g1h)
 {
   // Ergonomically select initial concurrent refinement parameters
@@ -58,12 +59,10 @@
     return NULL;
   }
   cg1r->_n_worker_threads = thread_num();
-  // We need one extra thread to do the young gen rset size sampling.
-  cg1r->_n_threads = cg1r->_n_worker_threads + 1;
 
   cg1r->reset_threshold_step();
 
-  cg1r->_threads = NEW_C_HEAP_ARRAY_RETURN_NULL(ConcurrentG1RefineThread*, cg1r->_n_threads, mtGC);
+  cg1r->_threads = NEW_C_HEAP_ARRAY_RETURN_NULL(ConcurrentG1RefineThread*, cg1r->_n_worker_threads, mtGC);
   if (cg1r->_threads == NULL) {
     *ecode = JNI_ENOMEM;
     vm_shutdown_during_initialization("Could not allocate an array for ConcurrentG1RefineThread");
@@ -73,7 +72,7 @@
   uint worker_id_offset = DirtyCardQueueSet::num_par_ids();
 
   ConcurrentG1RefineThread *next = NULL;
-  for (uint i = cg1r->_n_threads - 1; i != UINT_MAX; i--) {
+  for (uint i = cg1r->_n_worker_threads - 1; i != UINT_MAX; i--) {
     ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(cg1r, next, refine_closure, worker_id_offset, i);
     assert(t != NULL, "Conc refine should have been created");
     if (t->osthread() == NULL) {
@@ -86,6 +85,14 @@
     cg1r->_threads[i] = t;
     next = t;
   }
+
+  cg1r->_sample_thread = new G1YoungRemSetSamplingThread();
+  if (cg1r->_sample_thread->osthread() == NULL) {
+    *ecode = JNI_ENOMEM;
+    vm_shutdown_during_initialization("Could not create G1YoungRemSetSamplingThread");
+    return NULL;
+  }
+
   *ecode = JNI_OK;
   return cg1r;
 }
@@ -103,44 +110,36 @@
 }
 
 void ConcurrentG1Refine::stop() {
-  if (_threads != NULL) {
-    for (uint i = 0; i < _n_threads; i++) {
-      _threads[i]->stop();
-    }
+  for (uint i = 0; i < _n_worker_threads; i++) {
+    _threads[i]->stop();
   }
+  _sample_thread->stop();
 }
 
 void ConcurrentG1Refine::reinitialize_threads() {
   reset_threshold_step();
-  if (_threads != NULL) {
-    for (uint i = 0; i < _n_threads; i++) {
-      _threads[i]->initialize();
-    }
+  for (uint i = 0; i < _n_worker_threads; i++) {
+    _threads[i]->initialize();
   }
 }
 
 ConcurrentG1Refine::~ConcurrentG1Refine() {
-  if (_threads != NULL) {
-    for (uint i = 0; i < _n_threads; i++) {
-      delete _threads[i];
-    }
-    FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads);
+  for (uint i = 0; i < _n_worker_threads; i++) {
+    delete _threads[i];
   }
+  FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads);
+
+  delete _sample_thread;
 }
 
 void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
-  if (_threads != NULL) {
-    for (uint i = 0; i < _n_threads; i++) {
-      tc->do_thread(_threads[i]);
-    }
-  }
+  worker_threads_do(tc);
+  tc->do_thread(_sample_thread);
 }
 
 void ConcurrentG1Refine::worker_threads_do(ThreadClosure * tc) {
-  if (_threads != NULL) {
-    for (uint i = 0; i < worker_thread_num(); i++) {
-      tc->do_thread(_threads[i]);
-    }
+  for (uint i = 0; i < worker_thread_num(); i++) {
+    tc->do_thread(_threads[i]);
   }
 }
 
@@ -149,12 +148,10 @@
 }
 
 void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
-  for (uint i = 0; i < _n_threads; ++i) {
+  for (uint i = 0; i < _n_worker_threads; ++i) {
     _threads[i]->print_on(st);
     st->cr();
   }
+  _sample_thread->print_on(st);
+  st->cr();
 }
-
-ConcurrentG1RefineThread * ConcurrentG1Refine::sampling_thread() const {
-  return _threads[worker_thread_num()];
-}
--- a/hotspot/src/share/vm/gc/g1/concurrentG1Refine.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/concurrentG1Refine.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_G1_CONCURRENTG1REFINE_HPP
 
 #include "gc/g1/g1HotCardCache.hpp"
+#include "gc/g1/g1YoungRemSetSamplingThread.hpp"
 #include "memory/allocation.hpp"
 #include "runtime/thread.hpp"
 #include "utilities/globalDefinitions.hpp"
@@ -39,8 +40,9 @@
 class DirtyCardQueue;
 
 class ConcurrentG1Refine: public CHeapObj<mtGC> {
+  G1YoungRemSetSamplingThread* _sample_thread;
+
   ConcurrentG1RefineThread** _threads;
-  uint _n_threads;
   uint _n_worker_threads;
  /*
   * The value of the update buffer queue length falls into one of 3 zones:
@@ -91,8 +93,8 @@
   // Iterate over all worker refinement threads
   void worker_threads_do(ThreadClosure * tc);
 
-  // The RS sampling thread
-  ConcurrentG1RefineThread * sampling_thread() const;
+  // The RS sampling thread has nothing to do with refinement, but is here for now.
+  G1YoungRemSetSamplingThread * sampling_thread() const { return _sample_thread; }
 
   static uint thread_num();
 
@@ -106,7 +108,6 @@
   int yellow_zone() const     { return _yellow_zone; }
   int red_zone() const        { return _red_zone;    }
 
-  uint total_thread_num() const  { return _n_threads;        }
   uint worker_thread_num() const { return _n_worker_threads; }
 
   int thread_threshold_step() const { return _thread_threshold_step; }
--- a/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -50,9 +50,8 @@
   // Each thread has its own monitor. The i-th thread is responsible for signaling
   // to thread i+1 if the number of buffers in the queue exceeds a threshold for this
   // thread. Monitors are also used to wake up the threads during termination.
-  // The 0th worker in notified by mutator threads and has a special monitor.
-  // The last worker is used for young gen rset size sampling.
-  if (worker_id > 0) {
+  // The 0th (primary) worker is notified by mutator threads and has a special monitor.
+  if (!is_primary()) {
     _monitor = new Monitor(Mutex::nonleaf, "Refinement monitor", true,
                            Monitor::_safepoint_check_never);
   } else {
@@ -66,61 +65,11 @@
 }
 
 void ConcurrentG1RefineThread::initialize() {
-  if (_worker_id < cg1r()->worker_thread_num()) {
-    // Current thread activation threshold
-    _threshold = MIN2<int>(cg1r()->thread_threshold_step() * (_worker_id + 1) + cg1r()->green_zone(),
-                           cg1r()->yellow_zone());
-    // A thread deactivates once the number of buffer reached a deactivation threshold
-    _deactivation_threshold = MAX2<int>(_threshold - cg1r()->thread_threshold_step(), cg1r()->green_zone());
-  } else {
-    set_active(true);
-  }
-}
-
-void ConcurrentG1RefineThread::sample_young_list_rs_lengths() {
-  SuspendibleThreadSetJoiner sts_join;
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  G1CollectorPolicy* g1p = g1h->g1_policy();
-  if (g1p->adaptive_young_list_length()) {
-    int regions_visited = 0;
-    g1h->young_list()->rs_length_sampling_init();
-    while (g1h->young_list()->rs_length_sampling_more()) {
-      g1h->young_list()->rs_length_sampling_next();
-      ++regions_visited;
-
-      // we try to yield every time we visit 10 regions
-      if (regions_visited == 10) {
-        if (sts_join.should_yield()) {
-          sts_join.yield();
-          // we just abandon the iteration
-          break;
-        }
-        regions_visited = 0;
-      }
-    }
-
-    g1p->revise_young_list_target_length_if_necessary();
-  }
-}
-
-void ConcurrentG1RefineThread::run_young_rs_sampling() {
-  DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
-  _vtime_start = os::elapsedVTime();
-  while(!_should_terminate) {
-    sample_young_list_rs_lengths();
-
-    if (os::supports_vtime()) {
-      _vtime_accum = (os::elapsedVTime() - _vtime_start);
-    } else {
-      _vtime_accum = 0.0;
-    }
-
-    MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
-    if (_should_terminate) {
-      break;
-    }
-    _monitor->wait(Mutex::_no_safepoint_check_flag, G1ConcRefinementServiceIntervalMillis);
-  }
+  // Current thread activation threshold
+  _threshold = MIN2<int>(cg1r()->thread_threshold_step() * (_worker_id + 1) + cg1r()->green_zone(),
+                         cg1r()->yellow_zone());
+  // A thread deactivates once the number of buffer reached a deactivation threshold
+  _deactivation_threshold = MAX2<int>(_threshold - cg1r()->thread_threshold_step(), cg1r()->green_zone());
 }
 
 void ConcurrentG1RefineThread::wait_for_completed_buffers() {
@@ -133,12 +82,12 @@
 
 bool ConcurrentG1RefineThread::is_active() {
   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
-  return _worker_id > 0 ? _active : dcqs.process_completed_buffers();
+  return is_primary() ? dcqs.process_completed_buffers() : _active;
 }
 
 void ConcurrentG1RefineThread::activate() {
   MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
-  if (_worker_id > 0) {
+  if (!is_primary()) {
     if (G1TraceConcRefinement) {
       DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
       gclog_or_tty->print_cr("G1-Refine-activated worker %d, on threshold %d, current %d",
@@ -154,7 +103,7 @@
 
 void ConcurrentG1RefineThread::deactivate() {
   MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
-  if (_worker_id > 0) {
+  if (!is_primary()) {
     if (G1TraceConcRefinement) {
       DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
       gclog_or_tty->print_cr("G1-Refine-deactivated worker %d, off threshold %d, current %d",
@@ -171,25 +120,24 @@
   initialize_in_thread();
   wait_for_universe_init();
 
-  if (_worker_id >= cg1r()->worker_thread_num()) {
-    run_young_rs_sampling();
-    terminate();
-    return;
-  }
+  run_service();
+
+  terminate();
+}
 
+void ConcurrentG1RefineThread::run_service() {
   _vtime_start = os::elapsedVTime();
+
   while (!_should_terminate) {
-    DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
-
     // Wait for work
     wait_for_completed_buffers();
-
     if (_should_terminate) {
       break;
     }
 
     {
       SuspendibleThreadSetJoiner sts_join;
+      DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
 
       do {
         int curr_buffer_num = (int)dcqs.completed_buffers_num();
@@ -199,7 +147,7 @@
           dcqs.set_completed_queue_padding(0);
         }
 
-        if (_worker_id > 0 && curr_buffer_num <= _deactivation_threshold) {
+        if (!is_primary() && curr_buffer_num <= _deactivation_threshold) {
           // If the number of the buffer has fallen below our threshold
           // we should deactivate. The predecessor will reactivate this
           // thread should the number of the buffers cross the threshold again.
@@ -225,8 +173,10 @@
       _vtime_accum = 0.0;
     }
   }
-  assert(_should_terminate, "just checking");
-  terminate();
+
+  if (G1TraceConcRefinement) {
+    gclog_or_tty->print_cr("G1-Refine-stop");
+  }
 }
 
 void ConcurrentG1RefineThread::stop() {
@@ -236,10 +186,7 @@
     _should_terminate = true;
   }
 
-  {
-    MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
-    _monitor->notify();
-  }
+  stop_service();
 
   {
     MutexLockerEx mu(Terminator_lock);
@@ -247,8 +194,9 @@
       Terminator_lock->wait();
     }
   }
-  if (G1TraceConcRefinement) {
-    gclog_or_tty->print_cr("G1-Refine-stop");
-  }
 }
 
+void ConcurrentG1RefineThread::stop_service() {
+  MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
+  _monitor->notify();
+}
\ No newline at end of file
--- a/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -31,14 +31,14 @@
 class CardTableEntryClosure;
 class ConcurrentG1Refine;
 
-// The G1 Concurrent Refinement Thread (could be several in the future).
-
+// One or more G1 Concurrent Refinement Threads may be active if concurrent
+// refinement is in progress.
 class ConcurrentG1RefineThread: public ConcurrentGCThread {
   friend class VMStructs;
   friend class G1CollectedHeap;
 
   double _vtime_start;  // Initial virtual time.
-  double _vtime_accum;  // Initial virtual time.
+  double _vtime_accum;  // Accumulated virtual time.
   uint _worker_id;
   uint _worker_id_offset;
 
@@ -59,8 +59,6 @@
   // This thread deactivation threshold
   int _deactivation_threshold;
 
-  void sample_young_list_rs_lengths();
-  void run_young_rs_sampling();
   void wait_for_completed_buffers();
 
   void set_active(bool x) { _active = x; }
@@ -68,6 +66,11 @@
   void activate();
   void deactivate();
 
+  bool is_primary() { return (_worker_id == 0); }
+
+  void run_service();
+  void stop_service();
+
 public:
   virtual void run();
   // Constructor
--- a/hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -92,15 +92,31 @@
   }
 }
 
+// Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU.
+void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) {
+  if (g1_policy->adaptive_young_list_length()) {
+    double now = os::elapsedTime();
+    double prediction_ms = remark ? g1_policy->predict_remark_time_ms()
+                                  : g1_policy->predict_cleanup_time_ms();
+    G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
+    jlong sleep_time_ms = mmu_tracker->when_ms(now, prediction_ms);
+    os::sleep(this, sleep_time_ms, false);
+  }
+}
 void ConcurrentMarkThread::run() {
   initialize_in_thread();
+  wait_for_universe_init();
+
+  run_service();
+
+  terminate();
+}
+
+void ConcurrentMarkThread::run_service() {
   _vtime_start = os::elapsedVTime();
-  wait_for_universe_init();
 
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   G1CollectorPolicy* g1_policy = g1h->g1_policy();
-  G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
-  Thread *current_thread = Thread::current();
 
   while (!_should_terminate) {
     // wait until started is set.
@@ -141,12 +157,7 @@
         double mark_end_sec = os::elapsedTime();
         _vtime_mark_accum += (mark_end_time - cycle_start);
         if (!cm()->has_aborted()) {
-          if (g1_policy->adaptive_young_list_length()) {
-            double now = os::elapsedTime();
-            double remark_prediction_ms = g1_policy->predict_remark_time_ms();
-            jlong sleep_time_ms = mmu_tracker->when_ms(now, remark_prediction_ms);
-            os::sleep(current_thread, sleep_time_ms, false);
-          }
+          delay_to_keep_mmu(g1_policy, true /* remark */);
 
           cm_log(G1Log::fine(), true, "[GC concurrent-mark-end, %1.7lf secs]", mark_end_sec - mark_start_sec);
 
@@ -167,12 +178,7 @@
       _vtime_accum = (end_time - _vtime_start);
 
       if (!cm()->has_aborted()) {
-        if (g1_policy->adaptive_young_list_length()) {
-          double now = os::elapsedTime();
-          double cleanup_prediction_ms = g1_policy->predict_cleanup_time_ms();
-          jlong sleep_time_ms = mmu_tracker->when_ms(now, cleanup_prediction_ms);
-          os::sleep(current_thread, sleep_time_ms, false);
-        }
+        delay_to_keep_mmu(g1_policy, false /* cleanup */);
 
         CMCleanUp cl_cl(_cm);
         VM_CGC_Operation op(&cl_cl, "GC cleanup", false /* needs_pll */);
@@ -272,9 +278,6 @@
       g1h->register_concurrent_cycle_end();
     }
   }
-  assert(_should_terminate, "just checking");
-
-  terminate();
 }
 
 void ConcurrentMarkThread::stop() {
@@ -283,10 +286,7 @@
     _should_terminate = true;
   }
 
-  {
-    MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
-    CGC_lock->notify_all();
-  }
+  stop_service();
 
   {
     MutexLockerEx ml(Terminator_lock);
@@ -296,6 +296,11 @@
   }
 }
 
+void ConcurrentMarkThread::stop_service() {
+  MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
+  CGC_lock->notify_all();
+}
+
 void ConcurrentMarkThread::sleepBeforeNextCycle() {
   // We join here because we don't want to do the "shouldConcurrentMark()"
   // below while the world is otherwise stopped.
--- a/hotspot/src/share/vm/gc/g1/concurrentMarkThread.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/concurrentMarkThread.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -27,11 +27,11 @@
 
 #include "gc/shared/concurrentGCThread.hpp"
 
-// The Concurrent Mark GC Thread (could be several in the future).
-// This is copied from the Concurrent Mark Sweep GC Thread
-// Still under construction.
+// The Concurrent Mark GC Thread triggers the parallel CMConcurrentMarkingTasks
+// as well as handling various marking cleanup.
 
 class ConcurrentMark;
+class G1CollectorPolicy;
 
 class ConcurrentMarkThread: public ConcurrentGCThread {
   friend class VMStructs;
@@ -57,6 +57,10 @@
   volatile State _state;
 
   void sleepBeforeNextCycle();
+  void delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark);
+
+  void run_service();
+  void stop_service();
 
   static SurrogateLockerThread*         _slt;
 
@@ -67,9 +71,9 @@
   static void makeSurrogateLockerThread(TRAPS);
   static SurrogateLockerThread* slt() { return _slt; }
 
-  // Total virtual time so far.
+  // Total virtual time so far for this thread and concurrent marking tasks.
   double vtime_accum();
-  // Marking virtual time so far
+  // Marking virtual time so far this thread and concurrent marking tasks.
   double vtime_mark_accum();
 
   ConcurrentMark* cm()     { return _cm; }
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1716,7 +1716,7 @@
   return NULL;
 }
 
-bool G1CollectedHeap::expand(size_t expand_bytes) {
+bool G1CollectedHeap::expand(size_t expand_bytes, double* expand_time_ms) {
   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
                                        HeapRegion::GrainBytes);
@@ -1733,10 +1733,14 @@
     return false;
   }
 
+  double expand_heap_start_time_sec = os::elapsedTime();
   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
   assert(regions_to_expand > 0, "Must expand by at least one region");
 
   uint expanded_by = _hrm.expand_by(regions_to_expand);
+  if (expand_time_ms != NULL) {
+    *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
+  }
 
   if (expanded_by > 0) {
     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
@@ -3930,9 +3934,13 @@
         _allocator->init_gc_alloc_regions(evacuation_info);
 
         G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), g1_policy()->young_cset_region_length());
+        pre_evacuate_collection_set();
+
         // Actually do the work...
         evacuate_collection_set(evacuation_info, &per_thread_states);
 
+        post_evacuate_collection_set(evacuation_info, &per_thread_states);
+
         const size_t* surviving_young_words = per_thread_states.surviving_young_words();
         free_collection_set(g1_policy()->collection_set(), evacuation_info, surviving_young_words);
 
@@ -3997,9 +4005,11 @@
             size_t bytes_before = capacity();
             // No need for an ergo verbose message here,
             // expansion_amount() does this when it returns a value > 0.
-            if (!expand(expand_bytes)) {
+            double expand_ms;
+            if (!expand(expand_bytes, &expand_ms)) {
               // We failed to expand the heap. Cannot do anything about it.
             }
+            g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
           }
         }
 
@@ -4584,20 +4594,16 @@
     Klass* klass;
     do {
       klass =_klass_iterator.next_klass();
-    } while (klass != NULL && !klass->oop_is_instance());
-
-    return (InstanceKlass*)klass;
+    } while (klass != NULL && !klass->is_instance_klass());
+
+    // this can be null so don't call InstanceKlass::cast
+    return static_cast<InstanceKlass*>(klass);
   }
 
 public:
 
   void clean_klass(InstanceKlass* ik) {
-    ik->clean_implementors_list(_is_alive);
-    ik->clean_method_data(_is_alive);
-
-    // G1 specific cleanup work that has
-    // been moved here to be done in parallel.
-    ik->clean_dependent_nmethods();
+    ik->clean_weak_instanceklass_links(_is_alive);
   }
 
   void work() {
@@ -5164,27 +5170,29 @@
   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
 }
 
-void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
+void G1CollectedHeap::pre_evacuate_collection_set() {
   _expand_heap_after_alloc_failure = true;
   _evacuation_failed = false;
 
-  // Should G1EvacuationFailureALot be in effect for this GC?
-  NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
-
-  g1_rem_set()->prepare_for_oops_into_collection_set_do();
-
   // Disable the hot card cache.
   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
   hot_card_cache->reset_hot_cache_claimed_index();
   hot_card_cache->set_use_cache(false);
 
-  const uint n_workers = workers()->active_workers();
+}
+
+void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
+  g1_rem_set()->prepare_for_oops_into_collection_set_do();
+
+  // Should G1EvacuationFailureALot be in effect for this GC?
+  NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
 
   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
   double start_par_time_sec = os::elapsedTime();
   double end_par_time_sec;
 
   {
+    const uint n_workers = workers()->active_workers();
     G1RootProcessor root_processor(this, n_workers);
     G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
     // InitialMark needs claim bits to keep track of the marked-through CLDs.
@@ -5234,21 +5242,8 @@
     phase_times->record_string_dedup_fixup_time(fixup_time_ms);
   }
 
-  _allocator->release_gc_alloc_regions(evacuation_info);
   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
 
-  per_thread_states->flush();
-
-  record_obj_copy_mem_stats();
-
-  // Reset and re-enable the hot card cache.
-  // Note the counts for the cards in the regions in the
-  // collection set are reset when the collection set is freed.
-  hot_card_cache->reset_hot_cache();
-  hot_card_cache->set_use_cache(true);
-
-  purge_code_root_memory();
-
   if (evacuation_failed()) {
     remove_self_forwarding_pointers();
 
@@ -5266,6 +5261,23 @@
   // cards). We need these updates logged to update any
   // RSets.
   enqueue_discovered_references(per_thread_states);
+}
+
+void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
+  _allocator->release_gc_alloc_regions(evacuation_info);
+
+  per_thread_states->flush();
+
+  record_obj_copy_mem_stats();
+
+  // Reset and re-enable the hot card cache.
+  // Note the counts for the cards in the regions in the
+  // collection set are reset when the collection set is freed.
+  G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
+  hot_card_cache->reset_hot_cache();
+  hot_card_cache->set_use_cache(true);
+
+  purge_code_root_memory();
 
   redirty_logged_cards();
 #if defined(COMPILER2) || INCLUDE_JVMCI
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -538,7 +538,7 @@
   // Returns true if the heap was expanded by the requested amount;
   // false otherwise.
   // (Rounds up to a HeapRegion boundary.)
-  bool expand(size_t expand_bytes);
+  bool expand(size_t expand_bytes, double* expand_time_ms = NULL);
 
   // Returns the PLAB statistics for a given destination.
   inline G1EvacStats* alloc_buffer_stats(InCSetState dest);
@@ -728,7 +728,10 @@
   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 
   // Actually do the work of evacuating the collection set.
-  void evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states);
+  virtual void evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states);
+
+  void pre_evacuate_collection_set();
+  void post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 
   // Print the header for the per-thread termination statistics.
   static void print_termination_stats_hdr(outputStream* const st);
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -431,7 +431,7 @@
   }
   _free_regions_at_end_of_collection = _g1->num_free_regions();
 
-  update_young_list_target_length();
+  update_young_list_max_and_target_length();
   // We may immediately start allocating regions and placing them on the
   // collection set list. Initialize the per-collection set info
   start_incremental_cset_building();
@@ -507,13 +507,24 @@
   return _young_gen_sizer->max_desired_young_length();
 }
 
+void G1CollectorPolicy::update_young_list_max_and_target_length() {
+  update_young_list_max_and_target_length(get_new_prediction(_rs_lengths_seq));
+}
+
+void G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
+  update_young_list_target_length(rs_lengths);
+  update_max_gc_locker_expansion();
+}
+
 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
-  if (rs_lengths == (size_t) -1) {
-    // if it's set to the default value (-1), we should predict it;
-    // otherwise, use the given value.
-    rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
-  }
+  _young_list_target_length = bounded_young_list_target_length(rs_lengths);
+}
 
+void G1CollectorPolicy::update_young_list_target_length() {
+  update_young_list_target_length(get_new_prediction(_rs_lengths_seq));
+}
+
+uint G1CollectorPolicy::bounded_young_list_target_length(size_t rs_lengths) const {
   // Calculate the absolute and desired min bounds.
 
   // This is how many young regions we already have (currently: the survivors).
@@ -544,7 +555,6 @@
                                                            base_min_length,
                                                            desired_min_length,
                                                            desired_max_length);
-      _rs_lengths_prediction = rs_lengths;
     } else {
       // Don't calculate anything and let the code below bound it to
       // the desired_min_length, i.e., do the next GC as soon as
@@ -569,9 +579,8 @@
   assert(young_list_target_length > recorded_survivor_regions(),
          "we should be able to allocate at least one eden region");
   assert(young_list_target_length >= absolute_min_length, "post-condition");
-  _young_list_target_length = young_list_target_length;
 
-  update_max_gc_locker_expansion();
+  return young_list_target_length;
 }
 
 uint
@@ -695,11 +704,21 @@
   if (rs_lengths > _rs_lengths_prediction) {
     // add 10% to avoid having to recalculate often
     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
-    update_young_list_target_length(rs_lengths_prediction);
+    update_rs_lengths_prediction(rs_lengths_prediction);
+
+    update_young_list_max_and_target_length(rs_lengths_prediction);
   }
 }
 
+void G1CollectorPolicy::update_rs_lengths_prediction() {
+  update_rs_lengths_prediction(get_new_prediction(_rs_lengths_seq));
+}
 
+void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) {
+  if (collector_state()->gcs_are_young() && adaptive_young_list_length()) {
+    _rs_lengths_prediction = prediction;
+  }
+}
 
 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
                                                bool is_tlab,
@@ -801,7 +820,8 @@
   _free_regions_at_end_of_collection = _g1->num_free_regions();
   // Reset survivors SurvRateGroup.
   _survivor_surv_rate_group->reset();
-  update_young_list_target_length();
+  update_young_list_max_and_target_length();
+  update_rs_lengths_prediction();
   _collectionSetChooser->clear();
 }
 
@@ -879,6 +899,10 @@
   }
 }
 
+double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
+  return phase_times()->average_time_ms(phase);
+}
+
 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
   if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
     return false;
@@ -1049,16 +1073,16 @@
 
   if (update_stats) {
     double cost_per_card_ms = 0.0;
-    double cost_scan_hcc = phase_times()->average_time_ms(G1GCPhaseTimes::ScanHCC);
+    double cost_scan_hcc = average_time_ms(G1GCPhaseTimes::ScanHCC);
     if (_pending_cards > 0) {
-      cost_per_card_ms = (phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS) - cost_scan_hcc) / (double) _pending_cards;
+      cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - cost_scan_hcc) / (double) _pending_cards;
       _cost_per_card_ms_seq->add(cost_per_card_ms);
     }
     _cost_scan_hcc_seq->add(cost_scan_hcc);
 
     double cost_per_entry_ms = 0.0;
     if (cards_scanned > 10) {
-      cost_per_entry_ms = phase_times()->average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
+      cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
       if (collector_state()->last_gc_was_young()) {
         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
       } else {
@@ -1100,7 +1124,7 @@
     double cost_per_byte_ms = 0.0;
 
     if (copied_bytes > 0) {
-      cost_per_byte_ms = phase_times()->average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
+      cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
       if (collector_state()->in_marking_window()) {
         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
       } else {
@@ -1109,8 +1133,8 @@
     }
 
     double all_other_time_ms = pause_time_ms -
-      (phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS) + phase_times()->average_time_ms(G1GCPhaseTimes::ScanRS) +
-          phase_times()->average_time_ms(G1GCPhaseTimes::ObjCopy) + phase_times()->average_time_ms(G1GCPhaseTimes::Termination));
+      (average_time_ms(G1GCPhaseTimes::UpdateRS) + average_time_ms(G1GCPhaseTimes::ScanRS) +
+       average_time_ms(G1GCPhaseTimes::ObjCopy)  + average_time_ms(G1GCPhaseTimes::Termination));
 
     double young_other_time_ms = 0.0;
     if (young_cset_region_length() > 0) {
@@ -1147,12 +1171,13 @@
   collector_state()->set_in_marking_window(new_in_marking_window);
   collector_state()->set_in_marking_window_im(new_in_marking_window_im);
   _free_regions_at_end_of_collection = _g1->num_free_regions();
-  update_young_list_target_length();
+  update_young_list_max_and_target_length();
+  update_rs_lengths_prediction();
 
   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
 
-  double scan_hcc_time_ms = phase_times()->average_time_ms(G1GCPhaseTimes::ScanHCC);
+  double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC);
 
   if (update_rs_time_goal_ms < scan_hcc_time_ms) {
     ergo_verbose2(ErgoTiming,
@@ -1167,7 +1192,7 @@
   } else {
     update_rs_time_goal_ms -= scan_hcc_time_ms;
   }
-  adjust_concurrent_refinement(phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
+  adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
                                phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
                                update_rs_time_goal_ms);
 
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -27,6 +27,7 @@
 
 #include "gc/g1/collectionSetChooser.hpp"
 #include "gc/g1/g1CollectorState.hpp"
+#include "gc/g1/g1GCPhaseTimes.hpp"
 #include "gc/g1/g1InCSetState.hpp"
 #include "gc/g1/g1MMUTracker.hpp"
 #include "gc/g1/g1Predictions.hpp"
@@ -39,7 +40,6 @@
 
 class HeapRegion;
 class CollectionSetChooser;
-class G1GCPhaseTimes;
 
 // TraceYoungGenTime collects data on _both_ young and mixed evacuation pauses
 // (the latter may contain non-young regions - i.e. regions that are
@@ -378,6 +378,9 @@
 
   double accum_yg_surv_rate_pred(int age) const;
 
+protected:
+  virtual double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const;
+
 private:
   // Statistics kept per GC stoppage, pause or full.
   TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
@@ -465,12 +468,16 @@
   double _mark_remark_start_sec;
   double _mark_cleanup_start_sec;
 
+  void update_young_list_max_and_target_length();
+  void update_young_list_max_and_target_length(size_t rs_lengths);
+
   // Update the young list target length either by setting it to the
   // desired fixed value or by calculating it using G1's pause
   // prediction model. If no rs_lengths parameter is passed, predict
   // the RS lengths using the prediction model, otherwise use the
   // given rs_lengths as the prediction.
-  void update_young_list_target_length(size_t rs_lengths = (size_t) -1);
+  void update_young_list_target_length();
+  void update_young_list_target_length(size_t rs_lengths);
 
   // Calculate and return the minimum desired young list target
   // length. This is the minimum desired young list length according
@@ -493,6 +500,11 @@
                                           uint desired_min_length,
                                           uint desired_max_length) const;
 
+  uint bounded_young_list_target_length(size_t rs_lengths) const;
+
+  void update_rs_lengths_prediction();
+  void update_rs_lengths_prediction(size_t prediction);
+
   // Calculate and return chunk size (in number of regions) for parallel
   // concurrent mark cleanup.
   uint calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const;
--- a/hotspot/src/share/vm/gc/g1/g1EvacStats.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1EvacStats.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -104,9 +104,12 @@
     // Latch the result
     _desired_net_plab_sz = plab_sz;
     if (PrintPLAB) {
-      gclog_or_tty->print_cr(" (plab_sz = " SIZE_FORMAT " desired_plab_sz = " SIZE_FORMAT ") ", cur_plab_sz, plab_sz);
+      gclog_or_tty->print(" (plab_sz = " SIZE_FORMAT " desired_plab_sz = " SIZE_FORMAT ") ", cur_plab_sz, plab_sz);
     }
   }
+  if (PrintPLAB) {
+    gclog_or_tty->cr();
+  }
   // Clear accumulators for next round.
   reset();
 }
--- a/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -28,6 +28,7 @@
 #include "gc/g1/g1GCPhaseTimes.hpp"
 #include "gc/g1/g1Log.hpp"
 #include "gc/g1/g1StringDedup.hpp"
+#include "gc/g1/workerDataArray.inline.hpp"
 #include "memory/allocation.hpp"
 #include "runtime/os.hpp"
 
@@ -86,165 +87,6 @@
   }
 };
 
-template <class T>
-class WorkerDataArray  : public CHeapObj<mtGC> {
-  friend class G1GCParPhasePrinter;
-  T*          _data;
-  uint        _length;
-  const char* _title;
-  bool        _print_sum;
-  int         _log_level;
-  uint        _indent_level;
-  bool        _enabled;
-
-  WorkerDataArray<size_t>* _thread_work_items;
-
-  NOT_PRODUCT(T uninitialized();)
-
-  // We are caching the sum and average to only have to calculate them once.
-  // This is not done in an MT-safe way. It is intended to allow single
-  // threaded code to call sum() and average() multiple times in any order
-  // without having to worry about the cost.
-  bool   _has_new_data;
-  T      _sum;
-  T      _min;
-  T      _max;
-  double _average;
-
- public:
-  WorkerDataArray(uint length, const char* title, bool print_sum, int log_level, uint indent_level) :
-    _title(title), _length(0), _print_sum(print_sum), _log_level(log_level), _indent_level(indent_level),
-    _has_new_data(true), _thread_work_items(NULL), _enabled(true) {
-    assert(length > 0, "Must have some workers to store data for");
-    _length = length;
-    _data = NEW_C_HEAP_ARRAY(T, _length, mtGC);
-  }
-
-  ~WorkerDataArray() {
-    FREE_C_HEAP_ARRAY(T, _data);
-  }
-
-  void link_thread_work_items(WorkerDataArray<size_t>* thread_work_items) {
-    _thread_work_items = thread_work_items;
-  }
-
-  WorkerDataArray<size_t>* thread_work_items() { return _thread_work_items; }
-
-  void set(uint worker_i, T value) {
-    assert(worker_i < _length, "Worker %d is greater than max: %d", worker_i, _length);
-    assert(_data[worker_i] == WorkerDataArray<T>::uninitialized(), "Overwriting data for worker %d in %s", worker_i, _title);
-    _data[worker_i] = value;
-    _has_new_data = true;
-  }
-
-  void set_thread_work_item(uint worker_i, size_t value) {
-    assert(_thread_work_items != NULL, "No sub count");
-    _thread_work_items->set(worker_i, value);
-  }
-
-  T get(uint worker_i) {
-    assert(worker_i < _length, "Worker %d is greater than max: %d", worker_i, _length);
-    assert(_data[worker_i] != WorkerDataArray<T>::uninitialized(), "No data added for worker %d", worker_i);
-    return _data[worker_i];
-  }
-
-  void add(uint worker_i, T value) {
-    assert(worker_i < _length, "Worker %d is greater than max: %d", worker_i, _length);
-    assert(_data[worker_i] != WorkerDataArray<T>::uninitialized(), "No data to add to for worker %d", worker_i);
-    _data[worker_i] += value;
-    _has_new_data = true;
-  }
-
-  double average(uint active_threads){
-    calculate_totals(active_threads);
-    return _average;
-  }
-
-  T sum(uint active_threads) {
-    calculate_totals(active_threads);
-    return _sum;
-  }
-
-  T minimum(uint active_threads) {
-    calculate_totals(active_threads);
-    return _min;
-  }
-
-  T maximum(uint active_threads) {
-    calculate_totals(active_threads);
-    return _max;
-  }
-
-  void reset() PRODUCT_RETURN;
-  void verify(uint active_threads) PRODUCT_RETURN;
-
-  void set_enabled(bool enabled) { _enabled = enabled; }
-
-  int log_level() { return _log_level;  }
-
- private:
-
-  void calculate_totals(uint active_threads){
-    if (!_has_new_data) {
-      return;
-    }
-
-    _sum = (T)0;
-    _min = _data[0];
-    _max = _min;
-    assert(active_threads <= _length, "Wrong number of active threads");
-    for (uint i = 0; i < active_threads; ++i) {
-      T val = _data[i];
-      _sum += val;
-      _min = MIN2(_min, val);
-      _max = MAX2(_max, val);
-    }
-    _average = (double)_sum / (double)active_threads;
-    _has_new_data = false;
-  }
-};
-
-
-#ifndef PRODUCT
-
-template <>
-size_t WorkerDataArray<size_t>::uninitialized() {
-  return (size_t)-1;
-}
-
-template <>
-double WorkerDataArray<double>::uninitialized() {
-  return -1.0;
-}
-
-template <class T>
-void WorkerDataArray<T>::reset() {
-  for (uint i = 0; i < _length; i++) {
-    _data[i] = WorkerDataArray<T>::uninitialized();
-  }
-  if (_thread_work_items != NULL) {
-    _thread_work_items->reset();
-  }
-}
-
-template <class T>
-void WorkerDataArray<T>::verify(uint active_threads) {
-  if (!_enabled) {
-    return;
-  }
-
-  assert(active_threads <= _length, "Wrong number of active threads");
-  for (uint i = 0; i < active_threads; i++) {
-    assert(_data[i] != WorkerDataArray<T>::uninitialized(),
-           "Invalid data for worker %u in '%s'", i, _title);
-  }
-  if (_thread_work_items != NULL) {
-    _thread_work_items->verify(active_threads);
-  }
-}
-
-#endif
-
 G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
   _max_gc_threads(max_gc_threads)
 {
@@ -298,6 +140,7 @@
   assert(active_gc_threads > 0, "The number of threads must be > 0");
   assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max number of threads");
   _active_gc_threads = active_gc_threads;
+  _cur_expand_heap_time_ms = 0.0;
 
   for (int i = 0; i < GCParPhasesSentinel; i++) {
     _gc_par_phases[i]->reset();
@@ -363,6 +206,9 @@
     // current value of "other time"
     misc_time_ms += _cur_clear_ct_time_ms;
 
+    // Remove expand heap time from "other time"
+    misc_time_ms += _cur_expand_heap_time_ms;
+
     return misc_time_ms;
 }
 
@@ -536,6 +382,8 @@
     }
   }
   print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
+  print_stats(1, "Expand Heap After Collection", _cur_expand_heap_time_ms);
+
   double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms();
   print_stats(1, "Other", misc_time_ms);
   if (_cur_verify_before_time_ms > 0.0) {
--- a/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -92,6 +92,7 @@
   double _cur_string_dedup_fixup_time_ms;
 
   double _cur_clear_ct_time_ms;
+  double _cur_expand_heap_time_ms;
   double _cur_ref_proc_time_ms;
   double _cur_ref_enq_time_ms;
 
@@ -155,6 +156,10 @@
     _cur_clear_ct_time_ms = ms;
   }
 
+  void record_expand_heap_time(double ms) {
+    _cur_expand_heap_time_ms = ms;
+  }
+
   void record_par_time(double ms) {
     _cur_collection_par_time_ms = ms;
   }
@@ -252,6 +257,10 @@
     return _cur_clear_ct_time_ms;
   }
 
+  double cur_expand_heap_time_ms() {
+    return _cur_expand_heap_time_ms;
+  }
+
   double root_region_scan_wait_time_ms() {
     return _root_region_scan_wait_time_ms;
   }
--- a/hotspot/src/share/vm/gc/g1/g1MMUTracker.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1MMUTracker.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -29,11 +29,23 @@
 #include "memory/allocation.hpp"
 #include "utilities/debug.hpp"
 
-// Keeps track of the GC work and decides when it is OK to do GC work
+// Two major user controls over G1 behavior are setting a pause time goal (MaxGCPauseMillis),
+// over a time slice (GCPauseIntervalMillis). This defines the Minimum Mutator
+// Utilisation (MMU) goal.
+//
+// * Definitions *
+// Mutator Utilisation:
+// - for a given time slice duration "ts",
+// - mutator utilisation is the following fraction:
+//     non_gc_time / ts
+//
+// Minimum Mutator Utilisation (MMU):
+// - the worst mutator utilisation across all time slices.
+//
+// G1MMUTracker keeps track of the GC work and decides when it is OK to do GC work
 // and for how long so that the MMU invariants are maintained.
-
-/***** ALL TIMES ARE IN SECS!!!!!!! *****/
-
+//
+// ***** ALL TIMES ARE IN SECS!!!!!!! *****
 // this is the "interface"
 class G1MMUTracker: public CHeapObj<mtGC> {
 protected:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/g1YoungRemSetSamplingThread.hpp"
+#include "gc/g1/suspendibleThreadSet.hpp"
+#include "runtime/mutexLocker.hpp"
+
+void G1YoungRemSetSamplingThread::run() {
+  initialize_in_thread();
+  wait_for_universe_init();
+
+  run_service();
+
+  terminate();
+}
+
+void G1YoungRemSetSamplingThread::stop() {
+  // it is ok to take late safepoints here, if needed
+  {
+    MutexLockerEx mu(Terminator_lock);
+    _should_terminate = true;
+  }
+
+  stop_service();
+
+  {
+    MutexLockerEx mu(Terminator_lock);
+    while (!_has_terminated) {
+      Terminator_lock->wait();
+    }
+  }
+}
+
+G1YoungRemSetSamplingThread::G1YoungRemSetSamplingThread() : ConcurrentGCThread() {
+  _monitor = new Monitor(Mutex::nonleaf,
+                         "G1YoungRemSetSamplingThread monitor",
+                         true,
+                         Monitor::_safepoint_check_never);
+
+  create_and_start();
+
+  set_name("G1 Young RemSet Sampling");
+}
+
+void G1YoungRemSetSamplingThread::sleep_before_next_cycle() {
+  MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
+  if (!_should_terminate) {
+    intx waitms = G1ConcRefinementServiceIntervalMillis; // 300, really should be?
+    _monitor->wait(Mutex::_no_safepoint_check_flag, waitms);
+  }
+}
+
+void G1YoungRemSetSamplingThread::run_service() {
+  double vtime_start = os::elapsedVTime();
+
+  while (!_should_terminate) {
+    sample_young_list_rs_lengths();
+
+    if (os::supports_vtime()) {
+      _vtime_accum = (os::elapsedVTime() - vtime_start);
+    } else {
+      _vtime_accum = 0.0;
+    }
+
+    sleep_before_next_cycle();
+  }
+}
+
+void G1YoungRemSetSamplingThread::stop_service() {
+  MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
+  _monitor->notify();
+}
+
+void G1YoungRemSetSamplingThread::sample_young_list_rs_lengths() {
+  SuspendibleThreadSetJoiner sts;
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  G1CollectorPolicy* g1p = g1h->g1_policy();
+  if (g1p->adaptive_young_list_length()) {
+    int regions_visited = 0;
+    g1h->young_list()->rs_length_sampling_init();
+    while (g1h->young_list()->rs_length_sampling_more()) {
+      g1h->young_list()->rs_length_sampling_next();
+      ++regions_visited;
+
+      // we try to yield every time we visit 10 regions
+      if (regions_visited == 10) {
+        if (sts.should_yield()) {
+          sts.yield();
+          // we just abandon the iteration
+          break;
+        }
+        regions_visited = 0;
+      }
+    }
+
+    g1p->revise_young_list_target_length_if_necessary();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1YOUNGREMSETSAMPLINGTHREAD_HPP
+#define SHARE_VM_GC_G1_G1YOUNGREMSETSAMPLINGTHREAD_HPP
+
+#include "gc/shared/concurrentGCThread.hpp"
+
+// The G1YoungRemSetSamplingThread is used to re-assess the validity of
+// the prediction for the remembered set lengths of the young generation.
+//
+// At the end of the GC G1 determines the length of the young gen based on
+// how much time the next GC can take, and when the next GC may occur
+// according to the MMU.
+//
+// The assumption is that a significant part of the GC is spent on scanning
+// the remembered sets (and many other components), so this thread constantly
+// reevaluates the prediction for the remembered set scanning costs, and potentially
+// G1CollectorPolicy resizes the young gen. This may do a premature GC or even
+// increase the young gen size to keep pause time length goal.
+class G1YoungRemSetSamplingThread: public ConcurrentGCThread {
+private:
+  Monitor* _monitor;
+
+  void sample_young_list_rs_lengths();
+
+  void run_service();
+  void stop_service();
+
+  void sleep_before_next_cycle();
+
+  double _vtime_accum;  // Accumulated virtual time.
+
+public:
+  G1YoungRemSetSamplingThread();
+  double vtime_accum() { return _vtime_accum; }
+
+  virtual void run();
+  void stop();
+};
+
+#endif /* SHARE_VM_GC_G1_G1YOUNGREMSETSAMPLINGTHREAD_HPP */
--- a/hotspot/src/share/vm/gc/g1/heapRegion.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -660,7 +660,7 @@
   void print_object(outputStream* out, oop obj) {
 #ifdef PRODUCT
     Klass* k = obj->klass();
-    const char* class_name = InstanceKlass::cast(k)->external_name();
+    const char* class_name = k->external_name();
     out->print_cr("class name %s", class_name);
 #else // PRODUCT
     obj->print_on(out);
--- a/hotspot/src/share/vm/gc/g1/heapRegionRemSet.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/heapRegionRemSet.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -563,7 +563,7 @@
   assert(_n_fine_entries == _max_fine_entries, "Precondition");
   PerRegionTable* max = NULL;
   jint max_occ = 0;
-  PerRegionTable** max_prev;
+  PerRegionTable** max_prev = NULL;
   size_t max_ind;
 
   size_t i = _fine_eviction_start;
@@ -599,6 +599,7 @@
   }
 
   guarantee(max != NULL, "Since _n_fine_entries > 0");
+  guarantee(max_prev != NULL, "Since max != NULL.");
 
   // Set the corresponding coarse bit.
   size_t max_hrm_index = (size_t) max->hr()->hrm_index();
@@ -1138,7 +1139,7 @@
 
 void HeapRegionRemSet::print_recorded() {
   int cur_evnt = 0;
-  Event cur_evnt_kind;
+  Event cur_evnt_kind = Event_illegal;
   int cur_evnt_ind = 0;
   if (_n_recorded_events > 0) {
     cur_evnt_kind = _recorded_events[cur_evnt];
--- a/hotspot/src/share/vm/gc/g1/heapRegionRemSet.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/heapRegionRemSet.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -222,7 +222,7 @@
 
 public:
   enum Event {
-    Event_EvacStart, Event_EvacEnd, Event_RSUpdateEnd
+    Event_EvacStart, Event_EvacEnd, Event_RSUpdateEnd, Event_illegal
   };
 
 private:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/workerDataArray.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/workerDataArray.inline.hpp"
+
+#ifndef PRODUCT
+void WorkerDataArray_test() {
+  const uint length = 3;
+  const char* title = "Test array";
+  const bool print_sum = false;
+  const int log_level = 3;
+  const uint indent_level = 2;
+
+  WorkerDataArray<size_t> array(length, title, print_sum, log_level, indent_level);
+  assert(strncmp(array.title(), title, strlen(title)) == 0 , "Expected titles to match");
+  assert(array.should_print_sum() == print_sum, "Expected should_print_sum to match print_sum");
+  assert(array.log_level() == log_level, "Expected log levels to match");
+  assert(array.indentation() == indent_level, "Expected indentation to match");
+
+  const size_t expected[length] = {5, 3, 7};
+  for (uint i = 0; i < length; i++) {
+    array.set(i, expected[i]);
+  }
+  for (uint i = 0; i < length; i++) {
+    assert(array.get(i) == expected[i], "Expected elements to match");
+  }
+
+  assert(array.sum(length) == (5 + 3 + 7), "Expected sums to match");
+  assert(array.minimum(length) == 3, "Expected mininum to match");
+  assert(array.maximum(length) == 7, "Expected maximum to match");
+  assert(array.diff(length) == (7 - 3), "Expected diffs to match");
+  assert(array.average(length) == 5, "Expected averages to match");
+
+  for (uint i = 0; i < length; i++) {
+    array.add(i, 1);
+  }
+  for (uint i = 0; i < length; i++) {
+    assert(array.get(i) == expected[i] + 1, "Expected add to increment values");
+  }
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/workerDataArray.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "memory/allocation.hpp"
+#include "utilities/debug.hpp"
+
+template <class T>
+class WorkerDataArray  : public CHeapObj<mtGC> {
+  friend class G1GCParPhasePrinter;
+  T*          _data;
+  uint        _length;
+  const char* _title;
+  bool        _print_sum;
+  int         _log_level;
+  uint        _indent_level;
+  bool        _enabled;
+
+  WorkerDataArray<size_t>* _thread_work_items;
+
+  NOT_PRODUCT(inline T uninitialized() const;)
+
+  void set_all(T value);
+
+ public:
+  WorkerDataArray(uint length,
+                  const char* title,
+                  bool print_sum,
+                  int log_level,
+                  uint indent_level);
+
+  ~WorkerDataArray();
+
+  void link_thread_work_items(WorkerDataArray<size_t>* thread_work_items);
+  void set_thread_work_item(uint worker_i, size_t value);
+  WorkerDataArray<size_t>* thread_work_items() const {
+    return _thread_work_items;
+  }
+
+  void set(uint worker_i, T value);
+  T get(uint worker_i) const;
+
+  void add(uint worker_i, T value);
+
+  double average(uint active_threads) const;
+  T sum(uint active_threads) const;
+  T minimum(uint active_threads) const;
+  T maximum(uint active_threads) const;
+  T diff(uint active_threads) const;
+
+  uint indentation() const {
+    return _indent_level;
+  }
+
+  const char* title() const {
+    return _title;
+  }
+
+  bool should_print_sum() const {
+    return _print_sum;
+  }
+
+  int log_level() const {
+    return _log_level;
+  }
+
+  void clear();
+  void set_enabled(bool enabled) {
+    _enabled = enabled;
+  }
+
+  void reset() PRODUCT_RETURN;
+  void verify(uint active_threads) const PRODUCT_RETURN;
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/workerDataArray.inline.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "gc/g1/workerDataArray.hpp"
+#include "memory/allocation.inline.hpp"
+
+template <typename T>
+WorkerDataArray<T>::WorkerDataArray(uint length,
+                                    const char* title,
+                                    bool print_sum,
+                                    int log_level,
+                                    uint indent_level) :
+ _title(title),
+ _length(0),
+ _print_sum(print_sum),
+ _log_level(log_level),
+ _indent_level(indent_level),
+ _thread_work_items(NULL),
+ _enabled(true) {
+  assert(length > 0, "Must have some workers to store data for");
+  _length = length;
+  _data = NEW_C_HEAP_ARRAY(T, _length, mtGC);
+  reset();
+}
+
+template <typename T>
+void WorkerDataArray<T>::set(uint worker_i, T value) {
+  assert(worker_i < _length, "Worker %d is greater than max: %d", worker_i, _length);
+  assert(_data[worker_i] == uninitialized(), "Overwriting data for worker %d in %s", worker_i, _title);
+  _data[worker_i] = value;
+}
+
+template <typename T>
+T WorkerDataArray<T>::get(uint worker_i) const {
+  assert(worker_i < _length, "Worker %d is greater than max: %d", worker_i, _length);
+  assert(_data[worker_i] != uninitialized(), "No data added for worker %d", worker_i);
+  return _data[worker_i];
+}
+
+template <typename T>
+WorkerDataArray<T>::~WorkerDataArray() {
+  FREE_C_HEAP_ARRAY(T, _data);
+}
+
+template <typename T>
+void WorkerDataArray<T>::link_thread_work_items(WorkerDataArray<size_t>* thread_work_items) {
+  _thread_work_items = thread_work_items;
+}
+
+template <typename T>
+void WorkerDataArray<T>::set_thread_work_item(uint worker_i, size_t value) {
+  assert(_thread_work_items != NULL, "No sub count");
+  _thread_work_items->set(worker_i, value);
+}
+
+template <typename T>
+void WorkerDataArray<T>::add(uint worker_i, T value) {
+  assert(worker_i < _length, "Worker %d is greater than max: %d", worker_i, _length);
+  assert(_data[worker_i] != uninitialized(), "No data to add to for worker %d", worker_i);
+  _data[worker_i] += value;
+}
+
+template <typename T>
+double WorkerDataArray<T>::average(uint active_threads) const {
+  return sum(active_threads) / (double) active_threads;
+}
+
+template <typename T>
+T WorkerDataArray<T>::sum(uint active_threads) const {
+  T s = get(0);
+  for (uint i = 1; i < active_threads; ++i) {
+    s += get(i);
+  }
+  return s;
+}
+
+template <typename T>
+T WorkerDataArray<T>::minimum(uint active_threads) const {
+  T min = get(0);
+  for (uint i = 1; i < active_threads; ++i) {
+    min = MIN2(min, get(i));
+  }
+  return min;
+}
+
+template <typename T>
+T WorkerDataArray<T>::maximum(uint active_threads) const {
+  T max = get(0);
+  for (uint i = 1; i < active_threads; ++i) {
+    max = MAX2(max, get(i));
+  }
+  return max;
+}
+
+template <typename T>
+T WorkerDataArray<T>::diff(uint active_threads) const {
+  return maximum(active_threads) - minimum(active_threads);
+}
+
+template <typename T>
+void WorkerDataArray<T>::clear() {
+  set_all(0);
+}
+
+template <typename T>
+void WorkerDataArray<T>::set_all(T value) {
+  for (uint i = 0; i < _length; i++) {
+    _data[i] = value;
+  }
+}
+
+#ifndef PRODUCT
+template <typename T>
+void WorkerDataArray<T>::reset() {
+  set_all(uninitialized());
+  if (_thread_work_items != NULL) {
+    _thread_work_items->reset();
+  }
+}
+
+template <typename T>
+void WorkerDataArray<T>::verify(uint active_threads) const {
+  if (!_enabled) {
+    return;
+  }
+
+  assert(active_threads <= _length, "Wrong number of active threads");
+  for (uint i = 0; i < active_threads; i++) {
+    assert(_data[i] != uninitialized(),
+           "Invalid data for worker %u in '%s'", i, _title);
+  }
+  if (_thread_work_items != NULL) {
+    _thread_work_items->verify(active_threads);
+  }
+}
+
+template <>
+inline size_t WorkerDataArray<size_t>::uninitialized() const {
+  return (size_t)-1;
+}
+
+template <>
+inline double WorkerDataArray<double>::uninitialized() const {
+  return -1.0;
+}
+#endif
--- a/hotspot/src/share/vm/gc/parallel/psCompactionManager.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/parallel/psCompactionManager.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -200,7 +200,7 @@
     // by calling follow_class_loader explicitly. For non-anonymous classes
     // the call to follow_class_loader is made when the class loader itself
     // is handled.
-    if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) {
+    if (klass->is_instance_klass() && InstanceKlass::cast(klass)->is_anonymous()) {
       cm->follow_class_loader(klass->class_loader_data());
     } else {
       cm->follow_klass(klass);
--- a/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -383,7 +383,7 @@
 
   size_t old_size = gch->old_gen()->capacity();
   size_t new_size_before = _virtual_space.committed_size();
-  size_t min_new_size = spec()->init_size();
+  size_t min_new_size = initial_size();
   size_t max_new_size = reserved().byte_size();
   assert(min_new_size <= new_size_before &&
          new_size_before <= max_new_size,
--- a/hotspot/src/share/vm/gc/serial/tenuredGeneration.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/serial/tenuredGeneration.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -66,13 +66,6 @@
   const char* name() const { return "tenured generation"; }
   const char* short_name() const { return "Tenured"; }
 
-  // Does a "full" (forced) collection invoked on this generation collect
-  // the young generation as well? Note that this is a hack to allow the
-  // collection of the young gen first if the flag is set.
-  virtual bool full_collects_young_generation() const {
-    return !ScavengeBeforeFullGC;
-  }
-
   size_t unsafe_max_alloc_nogc() const;
   size_t contiguous_available() const;
 
--- a/hotspot/src/share/vm/gc/shared/cardGeneration.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/cardGeneration.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -208,8 +208,7 @@
   const double min_tmp = used_after_gc / maximum_used_percentage;
   size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
   // Don't shrink less than the initial generation size
-  minimum_desired_capacity = MAX2(minimum_desired_capacity,
-                                  spec()->init_size());
+  minimum_desired_capacity = MAX2(minimum_desired_capacity, initial_size());
   assert(used_after_gc <= minimum_desired_capacity, "sanity check");
 
   if (PrintGC && Verbose) {
@@ -262,8 +261,7 @@
     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
     const double max_tmp = used_after_gc / minimum_used_percentage;
     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
-    maximum_desired_capacity = MAX2(maximum_desired_capacity,
-                                    spec()->init_size());
+    maximum_desired_capacity = MAX2(maximum_desired_capacity, initial_size());
     if (PrintGC && Verbose) {
       gclog_or_tty->print_cr("  "
                              "  maximum_free_percentage: %6.2f"
@@ -299,20 +297,20 @@
       }
       if (PrintGC && Verbose) {
         gclog_or_tty->print_cr("  "
-                      "  shrinking:"
-                      "  initSize: %.1fK"
-                      "  maximum_desired_capacity: %.1fK",
-                      spec()->init_size() / (double) K,
-                      maximum_desired_capacity / (double) K);
+                               "  shrinking:"
+                               "  initSize: %.1fK"
+                               "  maximum_desired_capacity: %.1fK",
+                               initial_size() / (double) K,
+                               maximum_desired_capacity / (double) K);
         gclog_or_tty->print_cr("  "
-                      "  shrink_bytes: %.1fK"
-                      "  current_shrink_factor: " SIZE_FORMAT
-                      "  new shrink factor: " SIZE_FORMAT
-                      "  _min_heap_delta_bytes: %.1fK",
-                      shrink_bytes / (double) K,
-                      current_shrink_factor,
-                      _shrink_factor,
-                      _min_heap_delta_bytes / (double) K);
+                               "  shrink_bytes: %.1fK"
+                               "  current_shrink_factor: " SIZE_FORMAT
+                               "  new shrink factor: " SIZE_FORMAT
+                               "  _min_heap_delta_bytes: %.1fK",
+                               shrink_bytes / (double) K,
+                               current_shrink_factor,
+                               _shrink_factor,
+                               _min_heap_delta_bytes / (double) K);
       }
     }
   }
--- a/hotspot/src/share/vm/gc/shared/concurrentGCThread.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/concurrentGCThread.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -66,6 +66,7 @@
 }
 
 void ConcurrentGCThread::terminate() {
+  assert(_should_terminate, "Should only be called on terminate request.");
   // Signal that it is terminated
   {
     MutexLockerEx mu(Terminator_lock,
--- a/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -460,10 +460,9 @@
 
     bool prepared_for_verification = false;
     bool collected_old = false;
-    bool old_collects_young = complete &&
-                              _old_gen->full_collects_young_generation();
-    if (!old_collects_young &&
-        _young_gen->should_collect(full, size, is_tlab)) {
+    bool old_collects_young = complete && !ScavengeBeforeFullGC;
+
+    if (!old_collects_young && _young_gen->should_collect(full, size, is_tlab)) {
       if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
         prepare_for_verify();
         prepared_for_verification = true;
@@ -1107,10 +1106,6 @@
   _young_gen->prepare_for_compaction(&cp);
 }
 
-GCStats* GenCollectedHeap::gc_stats(Generation* gen) const {
-  return gen->gc_stats();
-}
-
 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
   if (!silent) {
     gclog_or_tty->print("%s", _old_gen->name());
--- a/hotspot/src/share/vm/gc/shared/genCollectedHeap.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/genCollectedHeap.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -126,8 +126,6 @@
 
   WorkGang* workers() const { return _workers; }
 
-  GCStats* gc_stats(Generation* generation) const;
-
   // Returns JNI_OK on success
   virtual jint initialize();
 
--- a/hotspot/src/share/vm/gc/shared/generation.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/generation.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -58,12 +58,12 @@
           (HeapWord*)_virtual_space.high_boundary());
 }
 
-GenerationSpec* Generation::spec() {
+size_t Generation::initial_size() {
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   if (gch->is_young_gen(this)) {
-    return gch->gen_policy()->young_gen_spec();
+    return gch->gen_policy()->young_gen_spec()->init_size();
   }
-  return gch->gen_policy()->old_gen_spec();
+  return gch->gen_policy()->old_gen_spec()->init_size();
 }
 
 size_t Generation::max_capacity() const {
--- a/hotspot/src/share/vm/gc/shared/generation.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/generation.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -141,14 +141,14 @@
   }
 
   virtual Generation::Name kind() { return Generation::Other; }
-  GenerationSpec* spec();
 
   // This properly belongs in the collector, but for now this
   // will do.
   virtual bool refs_discovery_is_atomic() const { return true;  }
   virtual bool refs_discovery_is_mt()     const { return false; }
 
-  // Space enquiries (results in bytes)
+  // Space inquiries (results in bytes)
+  size_t initial_size();
   virtual size_t capacity() const = 0;  // The maximum number of object bytes the
                                         // generation can currently hold.
   virtual size_t used() const = 0;      // The number of used bytes in the gen.
@@ -309,10 +309,6 @@
   // do nothing.
   virtual void par_oop_since_save_marks_iterate_done(int thread_num) {}
 
-  // This generation will collect all younger generations
-  // during a full collection.
-  virtual bool full_collects_young_generation() const { return false; }
-
   // This generation does in-place marking, meaning that mark words
   // are mutated during the marking phase and presumably reinitialized
   // to a canonical value after the GC. This is currently used by the
@@ -403,7 +399,7 @@
   // that was most recently collected. This allows the generation to
   // decide what statistics are valid to collect. For example, the
   // generation can decide to gather the amount of promoted data if
-  // the collection of the younger generations has completed.
+  // the collection of the young generation has completed.
   GCStats* gc_stats() const { return _gc_stats; }
   virtual void update_gc_stats(Generation* current_generation, bool full) {}
 
--- a/hotspot/src/share/vm/gc/shared/generationSpec.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/generationSpec.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -58,9 +58,7 @@
       // else registers with an existing CMSCollector
 
       ConcurrentMarkSweepGeneration* g = NULL;
-      g = new ConcurrentMarkSweepGeneration(rs,
-                 init_size(), remset, UseCMSAdaptiveFreeLists,
-                 (FreeBlockDictionary<FreeChunk>::DictionaryChoice)CMSDictionaryChoice);
+      g = new ConcurrentMarkSweepGeneration(rs, init_size(), remset);
 
       g->initialize_performance_counters();
 
--- a/hotspot/src/share/vm/gc/shared/referenceProcessor.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/referenceProcessor.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -373,7 +373,8 @@
   // so that the References are not considered active.
   while (obj != next_d) {
     obj = next_d;
-    assert(obj->is_instanceRef(), "should be reference object");
+    assert(obj->is_instance(), "should be an instance object");
+    assert(InstanceKlass::cast(obj->klass())->is_reference_instance_klass(), "should be reference object");
     next_d = java_lang_ref_Reference::discovered(obj);
     if (TraceReferenceGC && PrintGCDetails) {
       gclog_or_tty->print_cr("        obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
--- a/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -194,7 +194,7 @@
                                      bool is_bottom_frame);
 
   // Runtime support
-  static bool       is_not_reached(                       methodHandle method, int bci);
+  static bool       is_not_reached(const methodHandle& method, int bci);
   // Safepoint support
   static void       notice_safepoints()                         { ShouldNotReachHere(); } // stops the thread when reaching a safepoint
   static void       ignore_safepoints()                         { ShouldNotReachHere(); } // ignores safepoints
--- a/hotspot/src/share/vm/interpreter/bytecode.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecode.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -179,7 +179,7 @@
  protected:
   const methodHandle _method;                          // method containing the bytecode
 
-  Bytecode_member_ref(methodHandle method, int bci)  : Bytecode(method(), method()->bcp_from(bci)), _method(method) {}
+  Bytecode_member_ref(const methodHandle& method, int bci)  : Bytecode(method(), method()->bcp_from(bci)), _method(method) {}
 
   methodHandle method() const                    { return _method; }
   ConstantPool* constants() const              { return _method->constants(); }
@@ -201,10 +201,10 @@
 class Bytecode_invoke: public Bytecode_member_ref {
  protected:
   // Constructor that skips verification
-  Bytecode_invoke(methodHandle method, int bci, bool unused)  : Bytecode_member_ref(method, bci) {}
+  Bytecode_invoke(const methodHandle& method, int bci, bool unused)  : Bytecode_member_ref(method, bci) {}
 
  public:
-  Bytecode_invoke(methodHandle method, int bci)  : Bytecode_member_ref(method, bci) { verify(); }
+  Bytecode_invoke(const methodHandle& method, int bci)  : Bytecode_member_ref(method, bci) { verify(); }
   void verify() const;
 
   // Attributes
@@ -232,10 +232,10 @@
 
  private:
   // Helper to skip verification.   Used is_valid() to check if the result is really an invoke
-  inline friend Bytecode_invoke Bytecode_invoke_check(methodHandle method, int bci);
+  inline friend Bytecode_invoke Bytecode_invoke_check(const methodHandle& method, int bci);
 };
 
-inline Bytecode_invoke Bytecode_invoke_check(methodHandle method, int bci) {
+inline Bytecode_invoke Bytecode_invoke_check(const methodHandle& method, int bci) {
   return Bytecode_invoke(method, bci, false);
 }
 
@@ -243,7 +243,7 @@
 // Abstraction for all field accesses (put/get field/static)
 class Bytecode_field: public Bytecode_member_ref {
  public:
-  Bytecode_field(methodHandle method, int bci)  : Bytecode_member_ref(method, bci) { verify(); }
+  Bytecode_field(const methodHandle& method, int bci)  : Bytecode_member_ref(method, bci) { verify(); }
 
   // Testers
   bool is_getfield() const                       { return java_code() == Bytecodes::_getfield; }
@@ -316,7 +316,7 @@
   int raw_index() const;
 
  public:
-  Bytecode_loadconstant(methodHandle method, int bci): Bytecode(method(), method->bcp_from(bci)), _method(method) { verify(); }
+  Bytecode_loadconstant(const methodHandle& method, int bci): Bytecode(method(), method->bcp_from(bci)), _method(method) { verify(); }
 
   void verify() const {
     assert(_method.not_null(), "must supply method");
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -2148,11 +2148,8 @@
         if (!constants->tag_at(index).is_unresolved_klass()) {
           // Make sure klass is initialized and doesn't have a finalizer
           Klass* entry = constants->slot_at(index).get_klass();
-          assert(entry->is_klass(), "Should be resolved klass");
-          Klass* k_entry = (Klass*) entry;
-          assert(k_entry->oop_is_instance(), "Should be InstanceKlass");
-          InstanceKlass* ik = (InstanceKlass*) k_entry;
-          if ( ik->is_initialized() && ik->can_be_fastpath_allocated() ) {
+          InstanceKlass* ik = InstanceKlass::cast(entry);
+          if (ik->is_initialized() && ik->can_be_fastpath_allocated() ) {
             size_t obj_size = ik->size_helper();
             oop result = NULL;
             // If the TLAB isn't pre-zeroed then we'll have to do it
@@ -2609,9 +2606,9 @@
                   - klass: {other class}
 
                   but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure
-                  because rcvr->klass()->oop_is_instance() == 0
+                  because rcvr->klass()->is_instance_klass() == 0
                   However it seems to have a vtable in the right location. Huh?
-
+                  Because vtables have the same offset for ArrayKlass and InstanceKlass.
               */
               callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()];
               // Profile virtual call.
--- a/hotspot/src/share/vm/interpreter/bytecodeStream.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecodeStream.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -63,7 +63,7 @@
   bool            _is_raw;                       // false in 'cooked' BytecodeStream
 
   // Construction
-  BaseBytecodeStream(methodHandle method) : _method(method) {
+  BaseBytecodeStream(const methodHandle& method) : _method(method) {
     set_interval(0, _method->code_size());
     _is_raw = false;
   }
@@ -118,7 +118,7 @@
 class RawBytecodeStream: public BaseBytecodeStream {
  public:
   // Construction
-  RawBytecodeStream(methodHandle method) : BaseBytecodeStream(method) {
+  RawBytecodeStream(const methodHandle& method) : BaseBytecodeStream(method) {
     _is_raw = true;
   }
 
@@ -169,7 +169,7 @@
 
  public:
   // Construction
-  BytecodeStream(methodHandle method) : BaseBytecodeStream(method) { }
+  BytecodeStream(const methodHandle& method) : BaseBytecodeStream(method) { }
 
   // Iteration
   Bytecodes::Code next() {
--- a/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -84,7 +84,7 @@
 
   // This method is called while executing the raw bytecodes, so none of
   // the adjustments that BytecodeStream performs applies.
-  void trace(methodHandle method, address bcp, uintptr_t tos, uintptr_t tos2, outputStream* st) {
+  void trace(const methodHandle& method, address bcp, uintptr_t tos, uintptr_t tos2, outputStream* st) {
     ResourceMark rm;
     if (_current_method != method()) {
       // Note 1: This code will not work as expected with true MT/MP.
@@ -126,7 +126,7 @@
 
   // Used for Method*::print_codes().  The input bcp comes from
   // BytecodeStream, which will skip wide bytecodes.
-  void trace(methodHandle method, address bcp, outputStream* st) {
+  void trace(const methodHandle& method, address bcp, outputStream* st) {
     _current_method = method();
     ResourceMark rm;
     Bytecodes::Code code = Bytecodes::code_at(method(), bcp);
@@ -166,7 +166,7 @@
 }
 
 
-void BytecodeTracer::trace(methodHandle method, address bcp, uintptr_t tos, uintptr_t tos2, outputStream* st) {
+void BytecodeTracer::trace(const methodHandle& method, address bcp, uintptr_t tos, uintptr_t tos2, outputStream* st) {
   if (TraceBytecodes && BytecodeCounter::counter_value() >= TraceBytecodesAt) {
     ttyLocker ttyl;  // 5065316: keep the following output coherent
     // The ttyLocker also prevents races between two threads
@@ -185,7 +185,7 @@
   }
 }
 
-void BytecodeTracer::trace(methodHandle method, address bcp, outputStream* st) {
+void BytecodeTracer::trace(const methodHandle& method, address bcp, outputStream* st) {
   ttyLocker ttyl;  // 5065316: keep the following output coherent
   _closure->trace(method, bcp, st);
 }
@@ -390,7 +390,6 @@
 }
 
 
-PRAGMA_FORMAT_NONLITERAL_IGNORED_EXTERNAL
 void BytecodePrinter::print_attributes(int bci, outputStream* st) {
   // Show attributes of pre-rewritten codes
   Bytecodes::Code code = Bytecodes::java_code(raw_code());
@@ -512,15 +511,11 @@
         }
         st->print(" %d " INT32_FORMAT " " INT32_FORMAT " ",
                       default_dest, lo, hi);
-        int first = true;
-        for (int ll = lo; ll <= hi; ll++, first = false)  {
+        const char *comma = "";
+        for (int ll = lo; ll <= hi; ll++) {
           int idx = ll - lo;
-          const char *format = first ? " %d:" INT32_FORMAT " (delta: %d)" :
-                                       ", %d:" INT32_FORMAT " (delta: %d)";
-PRAGMA_DIAG_PUSH
-PRAGMA_FORMAT_NONLITERAL_IGNORED_INTERNAL
-          st->print(format, ll, dest[idx], dest[idx]-bci);
-PRAGMA_DIAG_POP
+          st->print("%s %d:" INT32_FORMAT " (delta: %d)", comma, ll, dest[idx], dest[idx]-bci);
+          comma = ",";
         }
         st->cr();
       }
@@ -536,14 +531,10 @@
           dest[i] = bci + get_int();
         };
         st->print(" %d %d ", default_dest, len);
-        bool first = true;
-        for (int ll = 0; ll < len; ll++, first = false)  {
-          const char *format = first ? " " INT32_FORMAT ":" INT32_FORMAT :
-                                       ", " INT32_FORMAT ":" INT32_FORMAT ;
-PRAGMA_DIAG_PUSH
-PRAGMA_FORMAT_NONLITERAL_IGNORED_INTERNAL
-          st->print(format, key[ll], dest[ll]);
-PRAGMA_DIAG_POP
+        const char *comma = "";
+        for (int ll = 0; ll < len; ll++)  {
+          st->print("%s " INT32_FORMAT ":" INT32_FORMAT, comma, key[ll], dest[ll]);
+          comma = ",";
         }
         st->cr();
       }
--- a/hotspot/src/share/vm/interpreter/bytecodeTracer.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecodeTracer.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,8 +46,8 @@
   static BytecodeClosure* closure()                                                   { return _closure; }
   static void             set_closure(BytecodeClosure* closure) { _closure = closure; }
 
-  static void             trace(methodHandle method, address bcp, uintptr_t tos, uintptr_t tos2, outputStream* st = tty);
-  static void             trace(methodHandle method, address bcp, outputStream* st = tty);
+  static void             trace(const methodHandle& method, address bcp, uintptr_t tos, uintptr_t tos2, outputStream* st = tty);
+  static void             trace(const methodHandle& method, address bcp, outputStream* st = tty);
 };
 
 
@@ -55,8 +55,8 @@
 
 class BytecodeClosure {
  public:
-  virtual void trace(methodHandle method, address bcp, uintptr_t tos, uintptr_t tos2, outputStream* st) = 0;
-  virtual void trace(methodHandle method, address bcp, outputStream* st) = 0;
+  virtual void trace(const methodHandle& method, address bcp, uintptr_t tos, uintptr_t tos2, outputStream* st) = 0;
+  virtual void trace(const methodHandle& method, address bcp, outputStream* st) = 0;
 };
 
 #endif // SHARE_VM_INTERPRETER_BYTECODETRACER_HPP
--- a/hotspot/src/share/vm/interpreter/interpreter.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/interpreter.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -320,7 +320,7 @@
 
 // Return true if the interpreter can prove that the given bytecode has
 // not yet been executed (in Java semantics, not in actual operation).
-bool AbstractInterpreter::is_not_reached(methodHandle method, int bci) {
+bool AbstractInterpreter::is_not_reached(const methodHandle& method, int bci) {
   Bytecodes::Code code = method()->code_at(bci);
 
   if (!Bytecodes::must_rewrite(code)) {
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1192,7 +1192,7 @@
   return handler;
 }
 
-void SignatureHandlerLibrary::add(methodHandle method) {
+void SignatureHandlerLibrary::add(const methodHandle& method) {
   if (method->signature_handler() == NULL) {
     // use slow signature handler if we can't do better
     int handler_index = -1;
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -218,7 +218,7 @@
   static void pd_set_handler(address handler);
 
  public:
-  static void add(methodHandle method);
+  static void add(const methodHandle& method);
   static void add(uint64_t fingerprint, address handler);
 };
 
--- a/hotspot/src/share/vm/interpreter/linkResolver.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/linkResolver.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -237,7 +237,7 @@
 //------------------------------------------------------------------------------------------------------------------------
 // Implementation of LinkInfo
 
-LinkInfo::LinkInfo(constantPoolHandle pool, int index, TRAPS) {
+LinkInfo::LinkInfo(const constantPoolHandle& pool, int index, TRAPS) {
    // resolve klass
   Klass* result = pool->klass_ref_at(index, CHECK);
   _resolved_klass = KlassHandle(THREAD, result);
@@ -302,17 +302,19 @@
   // Ignore overpasses so statics can be found during resolution
   Method* result = klass->uncached_lookup_method(name, signature, Klass::skip_overpass);
 
-  if (klass->oop_is_array()) {
+  if (klass->is_array_klass()) {
     // Only consider klass and super klass for arrays
     return methodHandle(THREAD, result);
   }
 
+  InstanceKlass* ik = InstanceKlass::cast(klass());
+
   // JDK 8, JVMS 5.4.3.4: Interface method resolution should
   // ignore static and non-public methods of java.lang.Object,
   // like clone, finalize, registerNatives.
   if (in_imethod_resolve &&
       result != NULL &&
-      klass->is_interface() &&
+      ik->is_interface() &&
       (result->is_static() || !result->is_public()) &&
       result->method_holder() == SystemDictionary::Object_klass()) {
     result = NULL;
@@ -321,11 +323,11 @@
   // Before considering default methods, check for an overpass in the
   // current class if a method has not been found.
   if (result == NULL) {
-    result = InstanceKlass::cast(klass())->find_method(name, signature);
+    result = ik->find_method(name, signature);
   }
 
   if (result == NULL) {
-    Array<Method*>* default_methods = InstanceKlass::cast(klass())->default_methods();
+    Array<Method*>* default_methods = ik->default_methods();
     if (default_methods != NULL) {
       result = InstanceKlass::find_method(default_methods, name, signature);
     }
@@ -353,7 +355,7 @@
     result = super_klass->uncached_lookup_method(name, signature, Klass::find_overpass);
   }
 
-  if (klass->oop_is_array()) {
+  if (klass->is_array_klass()) {
     // Only consider klass and super klass for arrays
     return methodHandle(THREAD, result);
   }
@@ -374,21 +376,21 @@
   int vtable_index = Method::invalid_vtable_index;
   Symbol* name = resolved_method->name();
   Symbol* signature = resolved_method->signature();
+  InstanceKlass* ik = InstanceKlass::cast(klass());
 
   // First check in default method array
-  if (!resolved_method->is_abstract() &&
-    (InstanceKlass::cast(klass())->default_methods() != NULL)) {
-    int index = InstanceKlass::find_method_index(InstanceKlass::cast(klass())->default_methods(),
+  if (!resolved_method->is_abstract() && ik->default_methods() != NULL) {
+    int index = InstanceKlass::find_method_index(ik->default_methods(),
                                                  name, signature, Klass::find_overpass,
                                                  Klass::find_static, Klass::find_private);
     if (index >= 0 ) {
-      vtable_index = InstanceKlass::cast(klass())->default_vtable_indices()->at(index);
+      vtable_index = ik->default_vtable_indices()->at(index);
     }
   }
   if (vtable_index == Method::invalid_vtable_index) {
     // get vtable_index for miranda methods
     ResourceMark rm;
-    klassVtable *vt = InstanceKlass::cast(klass())->vtable();
+    klassVtable *vt = ik->vtable();
     vtable_index = vt->index_of_miranda(name, signature);
   }
   return vtable_index;
@@ -529,7 +531,7 @@
   // to be false (so we'll short-circuit out of these tests).
   if (sel_method->name() == vmSymbols::clone_name() &&
       sel_klass() == SystemDictionary::Object_klass() &&
-      resolved_klass->oop_is_array()) {
+      resolved_klass->is_array_klass()) {
     // We need to change "protected" to "public".
     assert(flags.is_protected(), "clone not protected?");
     jint new_flags = flags.as_int();
@@ -559,7 +561,7 @@
 }
 
 methodHandle LinkResolver::resolve_method_statically(Bytecodes::Code code,
-                                                     constantPoolHandle pool, int index, TRAPS) {
+                                                     const constantPoolHandle& pool, int index, TRAPS) {
   // This method is used only
   // (1) in C2 from InlineTree::ok_to_inline (via ciMethod::check_call),
   // and
@@ -682,7 +684,7 @@
   // 2. lookup method in resolved klass and its super klasses
   methodHandle resolved_method = lookup_method_in_klasses(link_info, true, false, CHECK_NULL);
 
-  if (resolved_method.is_null() && !resolved_klass->oop_is_array()) { // not found in the class hierarchy
+  if (resolved_method.is_null() && !resolved_klass->is_array_klass()) { // not found in the class hierarchy
     // 3. lookup method in all the interfaces implemented by the resolved klass
     resolved_method = lookup_method_in_interfaces(link_info, CHECK_NULL);
 
@@ -742,7 +744,7 @@
   // JDK8: also look for static methods
   methodHandle resolved_method = lookup_method_in_klasses(link_info, false, true, CHECK_NULL);
 
-  if (resolved_method.is_null() && !resolved_klass->oop_is_array()) {
+  if (resolved_method.is_null() && !resolved_klass->is_array_klass()) {
     // lookup method in all the super-interfaces
     resolved_method = lookup_method_in_interfaces(link_info, CHECK_NULL);
   }
@@ -816,7 +818,7 @@
   }
 }
 
-void LinkResolver::resolve_field_access(fieldDescriptor& fd, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS) {
+void LinkResolver::resolve_field_access(fieldDescriptor& fd, const constantPoolHandle& pool, int index, Bytecodes::Code byte, TRAPS) {
   LinkInfo link_info(pool, index, CHECK);
   resolve_field(fd, link_info, byte, true, CHECK);
 }
@@ -1442,7 +1444,7 @@
 //------------------------------------------------------------------------------------------------------------------------
 // ConstantPool entries
 
-void LinkResolver::resolve_invoke(CallInfo& result, Handle recv, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS) {
+void LinkResolver::resolve_invoke(CallInfo& result, Handle recv, const constantPoolHandle& pool, int index, Bytecodes::Code byte, TRAPS) {
   switch (byte) {
     case Bytecodes::_invokestatic   : resolve_invokestatic   (result,       pool, index, CHECK); break;
     case Bytecodes::_invokespecial  : resolve_invokespecial  (result,       pool, index, CHECK); break;
@@ -1454,20 +1456,20 @@
   return;
 }
 
-void LinkResolver::resolve_invokestatic(CallInfo& result, constantPoolHandle pool, int index, TRAPS) {
+void LinkResolver::resolve_invokestatic(CallInfo& result, const constantPoolHandle& pool, int index, TRAPS) {
   LinkInfo link_info(pool, index, CHECK);
   resolve_static_call(result, link_info, /*initialize_class*/true, CHECK);
 }
 
 
-void LinkResolver::resolve_invokespecial(CallInfo& result, constantPoolHandle pool, int index, TRAPS) {
+void LinkResolver::resolve_invokespecial(CallInfo& result, const constantPoolHandle& pool, int index, TRAPS) {
   LinkInfo link_info(pool, index, CHECK);
   resolve_special_call(result, link_info, CHECK);
 }
 
 
 void LinkResolver::resolve_invokevirtual(CallInfo& result, Handle recv,
-                                          constantPoolHandle pool, int index,
+                                          const constantPoolHandle& pool, int index,
                                           TRAPS) {
 
   LinkInfo link_info(pool, index, CHECK);
@@ -1476,14 +1478,14 @@
 }
 
 
-void LinkResolver::resolve_invokeinterface(CallInfo& result, Handle recv, constantPoolHandle pool, int index, TRAPS) {
+void LinkResolver::resolve_invokeinterface(CallInfo& result, Handle recv, const constantPoolHandle& pool, int index, TRAPS) {
   LinkInfo link_info(pool, index, CHECK);
   KlassHandle recvrKlass (THREAD, recv.is_null() ? (Klass*)NULL : recv->klass());
   resolve_interface_call(result, recv, recvrKlass, link_info, true, CHECK);
 }
 
 
-void LinkResolver::resolve_invokehandle(CallInfo& result, constantPoolHandle pool, int index, TRAPS) {
+void LinkResolver::resolve_invokehandle(CallInfo& result, const constantPoolHandle& pool, int index, TRAPS) {
   // This guy is reached from InterpreterRuntime::resolve_invokehandle.
   LinkInfo link_info(pool, index, CHECK);
   if (TraceMethodHandles) {
@@ -1528,7 +1530,7 @@
   }
 }
 
-void LinkResolver::resolve_invokedynamic(CallInfo& result, constantPoolHandle pool, int index, TRAPS) {
+void LinkResolver::resolve_invokedynamic(CallInfo& result, const constantPoolHandle& pool, int index, TRAPS) {
   Symbol* method_name       = pool->name_ref_at(index);
   Symbol* method_signature  = pool->signature_ref_at(index);
   KlassHandle current_klass = KlassHandle(THREAD, pool->pool_holder());
--- a/hotspot/src/share/vm/interpreter/linkResolver.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/linkResolver.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -136,7 +136,7 @@
   KlassHandle _current_klass;   // class that owns the constant pool
   bool        _check_access;
  public:
-  LinkInfo(constantPoolHandle pool, int index, TRAPS);
+  LinkInfo(const constantPoolHandle& pool, int index, TRAPS);
   // Condensed information from other call sites within the vm.
   LinkInfo(KlassHandle resolved_klass, Symbol* name, Symbol* signature,
            KlassHandle current_klass, bool check_access = true) :
@@ -226,17 +226,17 @@
 
   // runtime resolving from constant pool
   static void resolve_invokestatic   (CallInfo& result,
-                                      constantPoolHandle pool, int index, TRAPS);
+                                      const constantPoolHandle& pool, int index, TRAPS);
   static void resolve_invokespecial  (CallInfo& result,
-                                      constantPoolHandle pool, int index, TRAPS);
+                                      const constantPoolHandle& pool, int index, TRAPS);
   static void resolve_invokevirtual  (CallInfo& result, Handle recv,
-                                      constantPoolHandle pool, int index, TRAPS);
+                                      const constantPoolHandle& pool, int index, TRAPS);
   static void resolve_invokeinterface(CallInfo& result, Handle recv,
-                                      constantPoolHandle pool, int index, TRAPS);
+                                      const constantPoolHandle& pool, int index, TRAPS);
   static void resolve_invokedynamic  (CallInfo& result,
-                                      constantPoolHandle pool, int index, TRAPS);
+                                      const constantPoolHandle& pool, int index, TRAPS);
   static void resolve_invokehandle   (CallInfo& result,
-                                      constantPoolHandle pool, int index, TRAPS);
+                                      const constantPoolHandle& pool, int index, TRAPS);
  public:
   // constant pool resolving
   static void check_klass_accessability(KlassHandle ref_klass, KlassHandle sel_klass, TRAPS);
@@ -244,11 +244,11 @@
   // static resolving calls (will not run any Java code);
   // used only from Bytecode_invoke::static_target
   static methodHandle resolve_method_statically(Bytecodes::Code code,
-                                                constantPoolHandle pool,
+                                                const constantPoolHandle& pool,
                                                 int index, TRAPS);
 
   static void resolve_field_access(fieldDescriptor& result,
-                                   constantPoolHandle pool,
+                                   const constantPoolHandle& pool,
                                    int index, Bytecodes::Code byte, TRAPS);
   static void resolve_field(fieldDescriptor& result, const LinkInfo& link_info,
                             Bytecodes::Code access_kind,
@@ -293,7 +293,7 @@
 
   // runtime resolving from constant pool
   static void resolve_invoke(CallInfo& result, Handle recv,
-                             constantPoolHandle pool, int index,
+                             const constantPoolHandle& pool, int index,
                              Bytecodes::Code byte, TRAPS);
  private:
   static void trace_method_resolution(const char* prefix, KlassHandle klass,
--- a/hotspot/src/share/vm/interpreter/oopMapCache.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/oopMapCache.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -426,7 +426,7 @@
   }
 }
 
-inline unsigned int OopMapCache::hash_value_for(methodHandle method, int bci) const {
+inline unsigned int OopMapCache::hash_value_for(const methodHandle& method, int bci) const {
   // We use method->code_size() rather than method->identity_hash() below since
   // the mark may not be present if a pointer to the method is already reversed.
   return   ((unsigned int) bci)
@@ -477,7 +477,7 @@
     }
 }
 
-void OopMapCache::lookup(methodHandle method,
+void OopMapCache::lookup(const methodHandle& method,
                          int bci,
                          InterpreterOopMap* entry_for) const {
   MutexLocker x(&_mut);
@@ -558,7 +558,7 @@
   return;
 }
 
-void OopMapCache::compute_one_oop_map(methodHandle method, int bci, InterpreterOopMap* entry) {
+void OopMapCache::compute_one_oop_map(const methodHandle& method, int bci, InterpreterOopMap* entry) {
   // Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack
   OopMapCacheEntry* tmp = NEW_C_HEAP_ARRAY(OopMapCacheEntry, 1, mtClass);
   tmp->initialize();
--- a/hotspot/src/share/vm/interpreter/oopMapCache.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/oopMapCache.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -116,7 +116,7 @@
   void set_expression_stack_size(int sz)         { _expression_stack_size = sz; }
 
   // Lookup
-  bool match(methodHandle method, int bci) const { return _method == method() && _bci == bci; }
+  bool match(const methodHandle& method, int bci) const { return _method == method() && _bci == bci; }
   bool is_empty() const;
 
   // Initialization
@@ -151,7 +151,7 @@
 
   OopMapCacheEntry* _array;
 
-  unsigned int hash_value_for(methodHandle method, int bci) const;
+  unsigned int hash_value_for(const methodHandle& method, int bci) const;
   OopMapCacheEntry* entry_at(int i) const;
 
   mutable Mutex _mut;
@@ -167,10 +167,10 @@
 
   // Returns the oopMap for (method, bci) in parameter "entry".
   // Returns false if an oop map was not found.
-  void lookup(methodHandle method, int bci, InterpreterOopMap* entry) const;
+  void lookup(const methodHandle& method, int bci, InterpreterOopMap* entry) const;
 
   // Compute an oop map without updating the cache or grabbing any locks (for debugging)
-  static void compute_one_oop_map(methodHandle method, int bci, InterpreterOopMap* entry);
+  static void compute_one_oop_map(const methodHandle& method, int bci, InterpreterOopMap* entry);
 
   // Returns total no. of bytes allocated as part of OopMapCache's
   static long memory_usage()                     PRODUCT_RETURN0;
--- a/hotspot/src/share/vm/interpreter/rewriter.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/rewriter.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -509,7 +509,7 @@
   // (That's all, folks.)
 }
 
-Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Method*>* methods, TRAPS)
+Rewriter::Rewriter(instanceKlassHandle klass, const constantPoolHandle& cpool, Array<Method*>* methods, TRAPS)
   : _klass(klass),
     _pool(cpool),
     _methods(methods)
--- a/hotspot/src/share/vm/interpreter/rewriter.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/rewriter.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -185,7 +185,7 @@
   }
 
   // All the work goes in here:
-  Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Method*>* methods, TRAPS);
+  Rewriter(instanceKlassHandle klass, const constantPoolHandle& cpool, Array<Method*>* methods, TRAPS);
 
   void compute_index_maps();
   void make_constant_pool_cache(TRAPS);
--- a/hotspot/src/share/vm/interpreter/templateInterpreter.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/templateInterpreter.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -562,7 +562,7 @@
   if (StopInterpreterAt > 0)                                     stop_interpreter_at();
   __ verify_FPU(1, t->tos_in());
 #endif // !PRODUCT
-  int step;
+  int step = 0;
   if (!t->does_dispatch()) {
     step = t->is_wide() ? Bytecodes::wide_length_for(t->bytecode()) : Bytecodes::length_for(t->bytecode());
     if (tos_out == ilgl) tos_out = t->tos_out();
--- a/hotspot/src/share/vm/jvmci/jvmciCompiler.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/jvmci/jvmciCompiler.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -72,7 +72,7 @@
   }
   jlong start = os::javaTimeMillis();
 
-  Array<Method*>* objectMethods = InstanceKlass::cast(SystemDictionary::Object_klass())->methods();
+  Array<Method*>* objectMethods = SystemDictionary::Object_klass()->methods();
   // Initialize compile queue with a selected set of methods.
   int len = objectMethods->length();
   for (int i = 0; i < len; i++) {
--- a/hotspot/src/share/vm/jvmci/jvmciCompilerToVM.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/jvmci/jvmciCompilerToVM.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -60,7 +60,7 @@
 
 #define C2V_END }
 
-oop CompilerToVM::get_jvmci_method(methodHandle method, TRAPS) {
+oop CompilerToVM::get_jvmci_method(const methodHandle& method, TRAPS) {
   if (method() != NULL) {
     JavaValue result(T_OBJECT);
     JavaCallArguments args;
@@ -549,7 +549,7 @@
   Klass* caller_klass = CompilerToVM::asKlass(caller_jvmci_type);
   Method* method = CompilerToVM::asMethod(jvmci_method);
 
-  if (recv_klass->oop_is_array() || (InstanceKlass::cast(recv_klass)->is_linked())) {
+  if (recv_klass->is_array_klass() || (InstanceKlass::cast(recv_klass)->is_linked())) {
     Klass* holder_klass = method->method_holder();
     Symbol* method_name = method->name();
     Symbol* method_signature = method->signature();
--- a/hotspot/src/share/vm/jvmci/jvmciCompilerToVM.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/jvmci/jvmciCompilerToVM.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -94,7 +94,7 @@
     return (MethodData*) (address) metaspaceMethodData;
   }
 
-  static oop get_jvmci_method(methodHandle method, TRAPS);
+  static oop get_jvmci_method(const methodHandle& method, TRAPS);
 
   static oop get_jvmci_type(KlassHandle klass, TRAPS);
 };
--- a/hotspot/src/share/vm/jvmci/jvmciEnv.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/jvmci/jvmciEnv.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -64,18 +64,18 @@
 // Note: the logic of this method should mirror the logic of
 // constantPoolOopDesc::verify_constant_pool_resolve.
 bool JVMCIEnv::check_klass_accessibility(KlassHandle accessing_klass, KlassHandle resolved_klass) {
-  if (accessing_klass->oop_is_objArray()) {
+  if (accessing_klass->is_objArray_klass()) {
     accessing_klass = ObjArrayKlass::cast(accessing_klass())->bottom_klass();
   }
-  if (!accessing_klass->oop_is_instance()) {
+  if (!accessing_klass->is_instance_klass()) {
     return true;
   }
 
-  if (resolved_klass->oop_is_objArray()) {
+  if (resolved_klass->is_objArray_klass()) {
     // Find the element klass, if this is an array.
     resolved_klass = ObjArrayKlass::cast(resolved_klass())->bottom_klass();
   }
-  if (resolved_klass->oop_is_instance()) {
+  if (resolved_klass->is_instance_klass()) {
     return Reflection::verify_class_access(accessing_klass(), resolved_klass(), true);
   }
   return true;
@@ -83,7 +83,7 @@
 
 // ------------------------------------------------------------------
 KlassHandle JVMCIEnv::get_klass_by_name_impl(KlassHandle& accessing_klass,
-                                          constantPoolHandle& cpool,
+                                          const constantPoolHandle& cpool,
                                           Symbol* sym,
                                           bool require_local) {
   JVMCI_EXCEPTION_CONTEXT;
@@ -174,7 +174,7 @@
 
 // ------------------------------------------------------------------
 // Implementation of get_klass_by_index.
-KlassHandle JVMCIEnv::get_klass_by_index_impl(constantPoolHandle& cpool,
+KlassHandle JVMCIEnv::get_klass_by_index_impl(const constantPoolHandle& cpool,
                                         int index,
                                         bool& is_accessible,
                                         KlassHandle& accessor) {
@@ -215,7 +215,7 @@
 
 // ------------------------------------------------------------------
 // Get a klass from the constant pool.
-KlassHandle JVMCIEnv::get_klass_by_index(constantPoolHandle& cpool,
+KlassHandle JVMCIEnv::get_klass_by_index(const constantPoolHandle& cpool,
                                    int index,
                                    bool& is_accessible,
                                    KlassHandle& accessor) {
@@ -312,7 +312,7 @@
 
 
 // ------------------------------------------------------------------
-methodHandle JVMCIEnv::get_method_by_index_impl(constantPoolHandle& cpool,
+methodHandle JVMCIEnv::get_method_by_index_impl(const constantPoolHandle& cpool,
                                           int index, Bytecodes::Code bc,
                                           instanceKlassHandle& accessor) {
   if (bc == Bytecodes::_invokedynamic) {
@@ -383,9 +383,9 @@
   // For the case of <array>.clone(), the method holder can be an ArrayKlass*
   // instead of an InstanceKlass*.  For that case simply pretend that the
   // declared holder is Object.clone since that's where the call will bottom out.
-  if (method_holder->oop_is_instance()) {
+  if (method_holder->is_instance_klass()) {
     return instanceKlassHandle(method_holder());
-  } else if (method_holder->oop_is_array()) {
+  } else if (method_holder->is_array_klass()) {
     return instanceKlassHandle(SystemDictionary::Object_klass());
   } else {
     ShouldNotReachHere();
@@ -395,7 +395,7 @@
 
 
 // ------------------------------------------------------------------
-methodHandle JVMCIEnv::get_method_by_index(constantPoolHandle& cpool,
+methodHandle JVMCIEnv::get_method_by_index(const constantPoolHandle& cpool,
                                      int index, Bytecodes::Code bc,
                                      instanceKlassHandle& accessor) {
   ResourceMark rm;
--- a/hotspot/src/share/vm/jvmci/jvmciEnv.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/jvmci/jvmciEnv.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -83,13 +83,13 @@
                              bool require_local);
 
   // Constant pool access.
-  static KlassHandle   get_klass_by_index(constantPoolHandle& cpool,
+  static KlassHandle   get_klass_by_index(const constantPoolHandle& cpool,
                                 int klass_index,
                                 bool& is_accessible,
                                 KlassHandle& loading_klass);
   static void   get_field_by_index(instanceKlassHandle& loading_klass, fieldDescriptor& fd,
                                 int field_index);
-  static methodHandle  get_method_by_index(constantPoolHandle& cpool,
+  static methodHandle  get_method_by_index(const constantPoolHandle& cpool,
                                  int method_index, Bytecodes::Code bc,
                                  instanceKlassHandle& loading_klass);
 
@@ -106,16 +106,16 @@
 
   // Implementation methods for loading and constant pool access.
   static KlassHandle get_klass_by_name_impl(KlassHandle& accessing_klass,
-                                  constantPoolHandle& cpool,
+                                  const constantPoolHandle& cpool,
                                   Symbol* klass_name,
                                   bool require_local);
-  static KlassHandle   get_klass_by_index_impl(constantPoolHandle& cpool,
+  static KlassHandle   get_klass_by_index_impl(const constantPoolHandle& cpool,
                                      int klass_index,
                                      bool& is_accessible,
                                      KlassHandle& loading_klass);
   static void   get_field_by_index_impl(instanceKlassHandle& loading_klass, fieldDescriptor& fd,
                                      int field_index);
-  static methodHandle  get_method_by_index_impl(constantPoolHandle& cpool,
+  static methodHandle  get_method_by_index_impl(const constantPoolHandle& cpool,
                                       int method_index, Bytecodes::Code bc,
                                       instanceKlassHandle& loading_klass);
 
--- a/hotspot/src/share/vm/jvmci/jvmciJavaClasses.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/jvmci/jvmciJavaClasses.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -306,7 +306,7 @@
     }                                                                                                                                                          \
     static void compute_offsets();                                                                                                                             \
   public:                                                                                                                                                      \
-    static InstanceKlass* klass() { return SystemDictionary::name##_klass() == NULL ? NULL : InstanceKlass::cast(SystemDictionary::name##_klass()); }
+    static InstanceKlass* klass() { return SystemDictionary::name##_klass(); }
 
 #define END_CLASS };
 
--- a/hotspot/src/share/vm/jvmci/jvmciRuntime.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/jvmci/jvmciRuntime.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -124,7 +124,7 @@
   //       (This may have to change if this code changes!)
   assert(array_klass->is_klass(), "not a class");
   oop obj;
-  if (array_klass->oop_is_typeArray()) {
+  if (array_klass->is_typeArray_klass()) {
     BasicType elt_type = TypeArrayKlass::cast(array_klass)->element_type();
     obj = oopFactory::new_typeArray(elt_type, length, CHECK);
   } else {
--- a/hotspot/src/share/vm/memory/allocation.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/memory/allocation.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -66,7 +66,7 @@
 }
 
 void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() {
-  address res;
+  address res = NULL;
   switch (type) {
    case C_HEAP:
     res = (address)AllocateHeap(size, flags, CALLER_PC);
@@ -88,8 +88,8 @@
 
 void* ResourceObj::operator new(size_t size, const std::nothrow_t&  nothrow_constant,
     allocation_type type, MEMFLAGS flags) throw() {
-  //should only call this with std::nothrow, use other operator new() otherwise
-  address res;
+  // should only call this with std::nothrow, use other operator new() otherwise
+  address res = NULL;
   switch (type) {
    case C_HEAP:
     res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL);
--- a/hotspot/src/share/vm/memory/heapInspection.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/memory/heapInspection.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -286,7 +286,6 @@
   return true;
 }
 
-PRAGMA_FORMAT_NONLITERAL_IGNORED_EXTERNAL
 void KlassInfoHisto::print_title(outputStream* st, bool csv_format,
                                  bool selected[], int width_table[],
                                  const char *name_table[]) {
@@ -298,11 +297,10 @@
     st->print(",ClassName");
   } else {
     st->print("Index Super");
-    for (int c=0; c<KlassSizeStats::_num_columns; c++) {
-PRAGMA_DIAG_PUSH
-PRAGMA_FORMAT_NONLITERAL_IGNORED_INTERNAL
-      if (selected[c]) {st->print(str_fmt(width_table[c]), name_table[c]);}
-PRAGMA_DIAG_POP
+    for (int c = 0; c < KlassSizeStats::_num_columns; c++) {
+      if (selected[c]) {
+        st->print("%*s", width_table[c], name_table[c]);
+      }
     }
     st->print(" ClassName");
   }
@@ -321,7 +319,7 @@
 
   void do_cinfo(KlassInfoEntry* cie) {
     // ignore array classes
-    if (cie->klass()->oop_is_instance()) {
+    if (cie->klass()->is_instance_klass()) {
       _elements->append(cie);
     }
   }
@@ -348,8 +346,7 @@
 
   for(int i = 0; i < elements.length(); i++) {
     KlassInfoEntry* cie = elements.at(i);
-    const InstanceKlass* k = (InstanceKlass*)cie->klass();
-    Klass* super = ((InstanceKlass*)k)->java_super();
+    Klass* super = cie->klass()->super();
 
     // Set the index for the class.
     cie->set_index(i + 1);
@@ -544,8 +541,8 @@
       } else {
         int super_index = -1;
         // Print the stats for this class.
-        if (k->oop_is_instance()) {
-          Klass* super = ((InstanceKlass*)k)->java_super();
+        if (k->is_instance_klass()) {
+          Klass* super = k->super();
           if (super) {
             KlassInfoEntry* super_e = _cit->lookup(super);
             if (super_e) {
@@ -608,18 +605,12 @@
           case KlassSizeStats::_index_inst_size:
           case KlassSizeStats::_index_inst_count:
           case KlassSizeStats::_index_method_count:
-PRAGMA_DIAG_PUSH
-PRAGMA_FORMAT_NONLITERAL_IGNORED_INTERNAL
-            st->print(str_fmt(width_table[c]), "-");
-PRAGMA_DIAG_POP
+            st->print("%*s", width_table[c], "-");
             break;
           default:
             {
               double perc = (double)(100) * (double)(colsum_table[c]) / (double)sz_sum._total_bytes;
-PRAGMA_DIAG_PUSH
-PRAGMA_FORMAT_NONLITERAL_IGNORED_INTERNAL
-              st->print(perc_fmt(width_table[c]), perc);
-PRAGMA_DIAG_POP
+              st->print("%*.1f%%", width_table[c]-1, perc);
             }
           }
         }
--- a/hotspot/src/share/vm/memory/heapInspection.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/memory/heapInspection.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -313,32 +313,13 @@
     return HeapWordSize * x->size();
   }
 
-  // returns a format string to print a julong with the given width. E.g,
-  // printf(num_fmt(6), julong(10)) would print out the number 10 with 4
-  // leading spaces.
-PRAGMA_DIAG_PUSH
-PRAGMA_FORMAT_NONLITERAL_IGNORED
-
   static void print_julong(outputStream* st, int width, julong n) {
     int num_spaces = width - julong_width(n);
     if (num_spaces > 0) {
-      st->print(str_fmt(num_spaces), "");
+      st->print("%*s", num_spaces, "");
     }
     st->print(JULONG_FORMAT, n);
   }
-PRAGMA_DIAG_POP
-
-  static char* perc_fmt(int width) {
-    static char buf[32];
-    jio_snprintf(buf, sizeof(buf), "%%%d.1f%%%%", width-1);
-    return buf;
-  }
-
-  static char* str_fmt(int width) {
-    static char buf[32];
-    jio_snprintf(buf, sizeof(buf), "%%%ds", width);
-    return buf;
-  }
 
   static int julong_width(julong n) {
     if (n == 0) {
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -90,7 +90,7 @@
 static GrowableArray<Klass*>* _global_klass_objects;
 static void collect_classes(Klass* k) {
   _global_klass_objects->append_if_missing(k);
-  if (k->oop_is_instance()) {
+  if (k->is_instance_klass()) {
     // Add in the array classes too
     InstanceKlass* ik = InstanceKlass::cast(k);
     ik->array_klasses_do(collect_classes);
@@ -126,7 +126,7 @@
 static void rewrite_nofast_bytecodes_and_calculate_fingerprints() {
   for (int i = 0; i < _global_klass_objects->length(); i++) {
     Klass* k = _global_klass_objects->at(i);
-    if (k->oop_is_instance()) {
+    if (k->is_instance_klass()) {
       InstanceKlass* ik = InstanceKlass::cast(k);
       for (int i = 0; i < ik->methods()->length(); i++) {
         Method* m = ik->methods()->at(i);
@@ -199,9 +199,9 @@
   int n = _global_klass_objects->length();
   for (int i = 0; i < n; i++) {
     Klass* obj = _global_klass_objects->at(i);
-    // Note oop_is_instance() is a virtual call.  After patching vtables
+    // Note is_instance_klass() is a virtual call in debug.  After patching vtables
     // all virtual calls on the dummy vtables will restore the original!
-    if (obj->oop_is_instance()) {
+    if (obj->is_instance_klass()) {
       InstanceKlass* ik = InstanceKlass::cast(obj);
       *(void**)ik = find_matching_vtbl_ptr(vtbl_list, new_vtable_start, ik);
       ConstantPool* cp = ik->constants();
@@ -482,12 +482,12 @@
     int num_type_array = 0, num_obj_array = 0, num_inst = 0;
     for (int i = 0; i < _global_klass_objects->length(); i++) {
       Klass* k = _global_klass_objects->at(i);
-      if (k->oop_is_instance()) {
+      if (k->is_instance_klass()) {
         num_inst ++;
-      } else if (k->oop_is_objArray()) {
+      } else if (k->is_objArray_klass()) {
         num_obj_array ++;
       } else {
-        assert(k->oop_is_typeArray(), "sanity");
+        assert(k->is_typeArray_klass(), "sanity");
         num_type_array ++;
       }
     }
@@ -679,8 +679,8 @@
 
 void MetaspaceShared::link_one_shared_class(Klass* obj, TRAPS) {
   Klass* k = obj;
-  if (k->oop_is_instance()) {
-    InstanceKlass* ik = (InstanceKlass*) k;
+  if (k->is_instance_klass()) {
+    InstanceKlass* ik = InstanceKlass::cast(k);
     // Link the class to cause the bytecodes to be rewritten and the
     // cpcache to be created. Class verification is done according
     // to -Xverify setting.
@@ -690,7 +690,7 @@
 }
 
 void MetaspaceShared::check_one_shared_class(Klass* k) {
-  if (k->oop_is_instance() && InstanceKlass::cast(k)->check_sharing_error_state()) {
+  if (k->is_instance_klass() && InstanceKlass::cast(k)->check_sharing_error_state()) {
     _check_classes_made_progress = true;
   }
 }
--- a/hotspot/src/share/vm/memory/oopFactory.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/memory/oopFactory.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -81,10 +81,9 @@
 
 objArrayOop oopFactory::new_objArray(Klass* klass, int length, TRAPS) {
   assert(klass->is_klass(), "must be instance class");
-  if (klass->oop_is_array()) {
-    return ((ArrayKlass*)klass)->allocate_arrayArray(1, length, THREAD);
+  if (klass->is_array_klass()) {
+    return ArrayKlass::cast(klass)->allocate_arrayArray(1, length, THREAD);
   } else {
-    assert (klass->oop_is_instance(), "new object array with klass not an InstanceKlass");
-    return ((InstanceKlass*)klass)->allocate_objArray(1, length, THREAD);
+    return InstanceKlass::cast(klass)->allocate_objArray(1, length, THREAD);
   }
 }
--- a/hotspot/src/share/vm/memory/universe.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/memory/universe.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -409,7 +409,7 @@
     int i = 0;
     while (i < size) {
         // Allocate dummy in old generation
-      oop dummy = InstanceKlass::cast(SystemDictionary::Object_klass())->allocate_instance(CHECK);
+      oop dummy = SystemDictionary::Object_klass()->allocate_instance(CHECK);
       dummy_array->obj_at_put(i++, dummy);
     }
     {
@@ -484,8 +484,8 @@
     _mirrors[T_LONG]    = _long_mirror;
     _mirrors[T_SHORT]   = _short_mirror;
     _mirrors[T_VOID]    = _void_mirror;
-  //_mirrors[T_OBJECT]  = InstanceKlass::cast(_object_klass)->java_mirror();
-  //_mirrors[T_ARRAY]   = InstanceKlass::cast(_object_klass)->java_mirror();
+  //_mirrors[T_OBJECT]  = _object_klass->java_mirror();
+  //_mirrors[T_ARRAY]   = _object_klass->java_mirror();
 }
 
 void Universe::fixup_mirrors(TRAPS) {
@@ -545,9 +545,8 @@
   Klass* ko = k_h();
   klassVtable* vt = ko->vtable();
   if (vt) vt->initialize_vtable(false, CHECK);
-  if (ko->oop_is_instance()) {
-    InstanceKlass* ik = (InstanceKlass*)ko;
-    for (KlassHandle s_h(THREAD, ik->subklass());
+  if (ko->is_instance_klass()) {
+    for (KlassHandle s_h(THREAD, ko->subklass());
          s_h() != NULL;
          s_h = KlassHandle(THREAD, s_h()->next_sibling())) {
       reinitialize_vtable_of(s_h, CHECK);
@@ -998,8 +997,8 @@
   // Setup static method for registering finalizers
   // The finalizer klass must be linked before looking up the method, in
   // case it needs to get rewritten.
-  InstanceKlass::cast(SystemDictionary::Finalizer_klass())->link_class(CHECK_false);
-  Method* m = InstanceKlass::cast(SystemDictionary::Finalizer_klass())->find_method(
+  SystemDictionary::Finalizer_klass()->link_class(CHECK_false);
+  Method* m = SystemDictionary::Finalizer_klass()->find_method(
                                   vmSymbols::register_method_name(),
                                   vmSymbols::register_method_signature());
   if (m == NULL || !m->is_static()) {
@@ -1009,8 +1008,8 @@
   Universe::_finalizer_register_cache->init(
     SystemDictionary::Finalizer_klass(), m);
 
-  InstanceKlass::cast(SystemDictionary::misc_Unsafe_klass())->link_class(CHECK_false);
-  m = InstanceKlass::cast(SystemDictionary::misc_Unsafe_klass())->find_method(
+  SystemDictionary::internal_Unsafe_klass()->link_class(CHECK_false);
+  m = SystemDictionary::internal_Unsafe_klass()->find_method(
                                   vmSymbols::throwIllegalAccessError_name(),
                                   vmSymbols::void_method_signature());
   if (m != NULL && !m->is_static()) {
@@ -1020,11 +1019,11 @@
     return false; // initialization failed (cannot throw exception yet)
   }
   Universe::_throw_illegal_access_error_cache->init(
-    SystemDictionary::misc_Unsafe_klass(), m);
+    SystemDictionary::internal_Unsafe_klass(), m);
 
   // Setup method for registering loaded classes in class loader vector
-  InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false);
-  m = InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
+  SystemDictionary::ClassLoader_klass()->link_class(CHECK_false);
+  m = SystemDictionary::ClassLoader_klass()->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
   if (m == NULL || m->is_static()) {
     tty->print_cr("Unable to link/verify ClassLoader.addClass method");
     return false; // initialization failed (cannot throw exception yet)
@@ -1033,8 +1032,8 @@
     SystemDictionary::ClassLoader_klass(), m);
 
   // Setup method for checking protection domain
-  InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->link_class(CHECK_false);
-  m = InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->
+  SystemDictionary::ProtectionDomain_klass()->link_class(CHECK_false);
+  m = SystemDictionary::ProtectionDomain_klass()->
             find_method(vmSymbols::impliesCreateAccessControlContext_name(),
                         vmSymbols::void_boolean_signature());
   // Allow NULL which should only happen with bootstrapping.
--- a/hotspot/src/share/vm/oops/arrayKlass.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/arrayKlass.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -50,7 +50,7 @@
 
  public:
   // Testing operation
-  bool oop_is_array_slow() const { return true; }
+  DEBUG_ONLY(bool is_array_klass_slow() const { return true; })
 
   // Instance variables
   int dimension() const                 { return _dimension;      }
@@ -86,8 +86,8 @@
 
   // Casting from Klass*
   static ArrayKlass* cast(Klass* k) {
-    assert(k->oop_is_array(), "cast to ArrayKlass");
-    return (ArrayKlass*) k;
+    assert(k->is_array_klass(), "cast to ArrayKlass");
+    return static_cast<ArrayKlass*>(k);
   }
 
   GrowableArray<Klass*>* compute_secondary_supers(int num_extra_slots);
--- a/hotspot/src/share/vm/oops/constantPool.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/constantPool.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -178,7 +178,7 @@
   return (i < 0) ? _no_index_sentinel : i;
 }
 
-void ConstantPool::trace_class_resolution(constantPoolHandle this_cp, KlassHandle k) {
+void ConstantPool::trace_class_resolution(const constantPoolHandle& this_cp, KlassHandle k) {
   ResourceMark rm;
   int line_number = -1;
   const char * source_file = NULL;
@@ -198,16 +198,16 @@
     if (source_file != NULL) {
       tty->print("RESOLVE %s %s %s:%d\n",
                  this_cp->pool_holder()->external_name(),
-                 InstanceKlass::cast(k())->external_name(), source_file, line_number);
+                 k->external_name(), source_file, line_number);
     } else {
       tty->print("RESOLVE %s %s\n",
                  this_cp->pool_holder()->external_name(),
-                 InstanceKlass::cast(k())->external_name());
+                 k->external_name());
     }
   }
 }
 
-Klass* ConstantPool::klass_at_impl(constantPoolHandle this_cp, int which,
+Klass* ConstantPool::klass_at_impl(const constantPoolHandle& this_cp, int which,
                                    bool save_resolution_error, TRAPS) {
   assert(THREAD->is_Java_thread(), "must be a Java thread");
 
@@ -269,7 +269,7 @@
   ClassLoaderData* this_key = this_cp->pool_holder()->class_loader_data();
   this_key->record_dependency(k(), CHECK_NULL); // Can throw OOM
 
-  if (TraceClassResolution && !k->oop_is_array()) {
+  if (TraceClassResolution && !k->is_array_klass()) {
     // skip resolving the constant pool so that this code gets
     // called the next time some bytecodes refer to this class.
     trace_class_resolution(this_cp, k);
@@ -288,7 +288,7 @@
 // by compiler and exception handling.  Also used to avoid classloads for
 // instanceof operations. Returns NULL if the class has not been loaded or
 // if the verification of constant pool failed
-Klass* ConstantPool::klass_at_if_loaded(constantPoolHandle this_cp, int which) {
+Klass* ConstantPool::klass_at_if_loaded(const constantPoolHandle& this_cp, int which) {
   CPSlot entry = this_cp->slot_at(which);
   if (entry.is_resolved()) {
     assert(entry.get_klass()->is_klass(), "must be");
@@ -321,12 +321,12 @@
 }
 
 
-Klass* ConstantPool::klass_ref_at_if_loaded(constantPoolHandle this_cp, int which) {
+Klass* ConstantPool::klass_ref_at_if_loaded(const constantPoolHandle& this_cp, int which) {
   return klass_at_if_loaded(this_cp, this_cp->klass_ref_index_at(which));
 }
 
 
-Method* ConstantPool::method_at_if_loaded(constantPoolHandle cpool,
+Method* ConstantPool::method_at_if_loaded(const constantPoolHandle& cpool,
                                                    int which) {
   if (cpool->cache() == NULL)  return NULL;  // nothing to load yet
   int cache_index = decode_cpcache_index(which, true);
@@ -342,14 +342,14 @@
 }
 
 
-bool ConstantPool::has_appendix_at_if_loaded(constantPoolHandle cpool, int which) {
+bool ConstantPool::has_appendix_at_if_loaded(const constantPoolHandle& cpool, int which) {
   if (cpool->cache() == NULL)  return false;  // nothing to load yet
   int cache_index = decode_cpcache_index(which, true);
   ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
   return e->has_appendix();
 }
 
-oop ConstantPool::appendix_at_if_loaded(constantPoolHandle cpool, int which) {
+oop ConstantPool::appendix_at_if_loaded(const constantPoolHandle& cpool, int which) {
   if (cpool->cache() == NULL)  return NULL;  // nothing to load yet
   int cache_index = decode_cpcache_index(which, true);
   ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
@@ -357,14 +357,14 @@
 }
 
 
-bool ConstantPool::has_method_type_at_if_loaded(constantPoolHandle cpool, int which) {
+bool ConstantPool::has_method_type_at_if_loaded(const constantPoolHandle& cpool, int which) {
   if (cpool->cache() == NULL)  return false;  // nothing to load yet
   int cache_index = decode_cpcache_index(which, true);
   ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
   return e->has_method_type();
 }
 
-oop ConstantPool::method_type_at_if_loaded(constantPoolHandle cpool, int which) {
+oop ConstantPool::method_type_at_if_loaded(const constantPoolHandle& cpool, int which) {
   if (cpool->cache() == NULL)  return NULL;  // nothing to load yet
   int cache_index = decode_cpcache_index(which, true);
   ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
@@ -434,15 +434,15 @@
 }
 
 
-void ConstantPool::verify_constant_pool_resolve(constantPoolHandle this_cp, KlassHandle k, TRAPS) {
- if (k->oop_is_instance() || k->oop_is_objArray()) {
+void ConstantPool::verify_constant_pool_resolve(const constantPoolHandle& this_cp, KlassHandle k, TRAPS) {
+ if (k->is_instance_klass() || k->is_objArray_klass()) {
     instanceKlassHandle holder (THREAD, this_cp->pool_holder());
-    Klass* elem = k->oop_is_instance() ? k() : ObjArrayKlass::cast(k())->bottom_klass();
+    Klass* elem = k->is_instance_klass() ? k() : ObjArrayKlass::cast(k())->bottom_klass();
     KlassHandle element (THREAD, elem);
 
     // The element type could be a typeArray - we only need the access check if it is
     // an reference to another class
-    if (element->oop_is_instance()) {
+    if (element->is_instance_klass()) {
       LinkResolver::check_klass_accessability(holder, element, CHECK);
     }
   }
@@ -502,7 +502,7 @@
 }
 
 
-void ConstantPool::resolve_string_constants_impl(constantPoolHandle this_cp, TRAPS) {
+void ConstantPool::resolve_string_constants_impl(const constantPoolHandle& this_cp, TRAPS) {
   for (int index = 1; index < this_cp->length(); index++) { // Index 0 is unused
     if (this_cp->tag_at(index).is_string()) {
       this_cp->string_at(index, CHECK);
@@ -526,7 +526,7 @@
   return true;
 }
 
-Symbol* ConstantPool::exception_message(constantPoolHandle this_cp, int which, constantTag tag, oop pending_exception) {
+Symbol* ConstantPool::exception_message(const constantPoolHandle& this_cp, int which, constantTag tag, oop pending_exception) {
   // Dig out the detailed message to reuse if possible
   Symbol* message = java_lang_Throwable::detail_message(pending_exception);
   if (message != NULL) {
@@ -554,7 +554,7 @@
   return message;
 }
 
-void ConstantPool::throw_resolution_error(constantPoolHandle this_cp, int which, TRAPS) {
+void ConstantPool::throw_resolution_error(const constantPoolHandle& this_cp, int which, TRAPS) {
   Symbol* message = NULL;
   Symbol* error = SystemDictionary::find_resolution_error(this_cp, which, &message);
   assert(error != NULL && message != NULL, "checking");
@@ -565,7 +565,7 @@
 
 // If resolution for Class, MethodHandle or MethodType fails, save the exception
 // in the resolution error table, so that the same exception is thrown again.
-void ConstantPool::save_and_throw_exception(constantPoolHandle this_cp, int which,
+void ConstantPool::save_and_throw_exception(const constantPoolHandle& this_cp, int which,
                                             constantTag tag, TRAPS) {
   Symbol* error = PENDING_EXCEPTION->klass()->name();
 
@@ -603,7 +603,7 @@
 // Called to resolve constants in the constant pool and return an oop.
 // Some constant pool entries cache their resolved oop. This is also
 // called to create oops from constants to use in arguments for invokedynamic
-oop ConstantPool::resolve_constant_at_impl(constantPoolHandle this_cp, int index, int cache_index, TRAPS) {
+oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp, int index, int cache_index, TRAPS) {
   oop result_oop = NULL;
   Handle throw_exception;
 
@@ -756,7 +756,7 @@
 }
 
 
-oop ConstantPool::resolve_bootstrap_specifier_at_impl(constantPoolHandle this_cp, int index, TRAPS) {
+oop ConstantPool::resolve_bootstrap_specifier_at_impl(const constantPoolHandle& this_cp, int index, TRAPS) {
   assert(this_cp->tag_at(index).is_invoke_dynamic(), "Corrupted constant pool");
 
   Handle bsm;
@@ -794,7 +794,7 @@
   return info();
 }
 
-oop ConstantPool::string_at_impl(constantPoolHandle this_cp, int which, int obj_index, TRAPS) {
+oop ConstantPool::string_at_impl(const constantPoolHandle& this_cp, int which, int obj_index, TRAPS) {
   // If the string has already been interned, this entry will be non-null
   oop str = this_cp->resolved_references()->obj_at(obj_index);
   if (str != NULL) return str;
@@ -830,7 +830,7 @@
 
 // Compare this constant pool's entry at index1 to the constant pool
 // cp2's entry at index2.
-bool ConstantPool::compare_entry_to(int index1, constantPoolHandle cp2,
+bool ConstantPool::compare_entry_to(int index1, const constantPoolHandle& cp2,
        int index2, TRAPS) {
 
   // The error tags are equivalent to non-error tags when comparing
@@ -1056,7 +1056,7 @@
 
 // Extend the operands array with the length and size of the ext_cp operands.
 // Used in RedefineClasses for CP merge.
-void ConstantPool::extend_operands(constantPoolHandle ext_cp, TRAPS) {
+void ConstantPool::extend_operands(const constantPoolHandle& ext_cp, TRAPS) {
   int delta_len = operand_array_length(ext_cp->operands());
   if (delta_len == 0) {
     return; // nothing to do
@@ -1096,8 +1096,8 @@
 } // end shrink_operands()
 
 
-void ConstantPool::copy_operands(constantPoolHandle from_cp,
-                                 constantPoolHandle to_cp,
+void ConstantPool::copy_operands(const constantPoolHandle& from_cp,
+                                 const constantPoolHandle& to_cp,
                                  TRAPS) {
 
   int from_oplen = operand_array_length(from_cp->operands());
@@ -1160,8 +1160,8 @@
 // Copy this constant pool's entries at start_i to end_i (inclusive)
 // to the constant pool to_cp's entries starting at to_i. A total of
 // (end_i - start_i) + 1 entries are copied.
-void ConstantPool::copy_cp_to_impl(constantPoolHandle from_cp, int start_i, int end_i,
-       constantPoolHandle to_cp, int to_i, TRAPS) {
+void ConstantPool::copy_cp_to_impl(const constantPoolHandle& from_cp, int start_i, int end_i,
+       const constantPoolHandle& to_cp, int to_i, TRAPS) {
 
 
   int dest_i = to_i;  // leave original alone for debug purposes
@@ -1191,8 +1191,8 @@
 
 // Copy this constant pool's entry at from_i to the constant pool
 // to_cp's entry at to_i.
-void ConstantPool::copy_entry_to(constantPoolHandle from_cp, int from_i,
-                                        constantPoolHandle to_cp, int to_i,
+void ConstantPool::copy_entry_to(const constantPoolHandle& from_cp, int from_i,
+                                        const constantPoolHandle& to_cp, int to_i,
                                         TRAPS) {
 
   int tag = from_cp->tag_at(from_i).value();
@@ -1339,7 +1339,7 @@
 // constant pool's entry at pattern_i. Returns the index of a
 // matching entry or zero (0) if there is no matching entry.
 int ConstantPool::find_matching_entry(int pattern_i,
-      constantPoolHandle search_cp, TRAPS) {
+      const constantPoolHandle& search_cp, TRAPS) {
 
   // index zero (0) is not used
   for (int i = 1; i < search_cp->length(); i++) {
@@ -1355,7 +1355,7 @@
 
 // Compare this constant pool's bootstrap specifier at idx1 to the constant pool
 // cp2's bootstrap specifier at idx2.
-bool ConstantPool::compare_operand_to(int idx1, constantPoolHandle cp2, int idx2, TRAPS) {
+bool ConstantPool::compare_operand_to(int idx1, const constantPoolHandle& cp2, int idx2, TRAPS) {
   int k1 = operand_bootstrap_method_ref_index_at(idx1);
   int k2 = cp2->operand_bootstrap_method_ref_index_at(idx2);
   bool match = compare_entry_to(k1, cp2, k2, CHECK_false);
@@ -1382,7 +1382,7 @@
 // this constant pool's bootstrap specifier at pattern_i index.
 // Return the index of a matching bootstrap specifier or (-1) if there is no match.
 int ConstantPool::find_matching_operand(int pattern_i,
-                    constantPoolHandle search_cp, int search_len, TRAPS) {
+                    const constantPoolHandle& search_cp, int search_len, TRAPS) {
   for (int i = 0; i < search_len; i++) {
     bool found = compare_operand_to(pattern_i, search_cp, i, CHECK_(-1));
     if (found) {
@@ -1843,7 +1843,7 @@
     if (cp->tag_at(i).is_unresolved_klass()) {
       // This will force loading of the class
       Klass* klass = cp->klass_at(i, CHECK);
-      if (klass->oop_is_instance()) {
+      if (klass->is_instance_klass()) {
         // Force initialization of class
         InstanceKlass::cast(klass)->initialize(CHECK);
       }
--- a/hotspot/src/share/vm/oops/constantPool.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/constantPool.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -604,15 +604,15 @@
     return offset;
   }
   // Compare a bootsrap specifier in the operands arrays
-  bool compare_operand_to(int bootstrap_specifier_index1, constantPoolHandle cp2,
+  bool compare_operand_to(int bootstrap_specifier_index1, const constantPoolHandle& cp2,
                           int bootstrap_specifier_index2, TRAPS);
   // Find a bootsrap specifier in the operands array
-  int find_matching_operand(int bootstrap_specifier_index, constantPoolHandle search_cp,
+  int find_matching_operand(int bootstrap_specifier_index, const constantPoolHandle& search_cp,
                             int operands_cur_len, TRAPS);
   // Resize the operands array with delta_len and delta_size
   void resize_operands(int delta_len, int delta_size, TRAPS);
   // Extend the operands array with the length and size of the ext_cp operands
-  void extend_operands(constantPoolHandle ext_cp, TRAPS);
+  void extend_operands(const constantPoolHandle& ext_cp, TRAPS);
   // Shrink the operands array to a smaller array with new_len length
   void shrink_operands(int new_len, TRAPS);
 
@@ -735,13 +735,13 @@
   friend class SystemDictionary;
 
   // Used by compiler to prevent classloading.
-  static Method*          method_at_if_loaded      (constantPoolHandle this_cp, int which);
-  static bool       has_appendix_at_if_loaded      (constantPoolHandle this_cp, int which);
-  static oop            appendix_at_if_loaded      (constantPoolHandle this_cp, int which);
-  static bool    has_method_type_at_if_loaded      (constantPoolHandle this_cp, int which);
-  static oop         method_type_at_if_loaded      (constantPoolHandle this_cp, int which);
-  static Klass*            klass_at_if_loaded      (constantPoolHandle this_cp, int which);
-  static Klass*        klass_ref_at_if_loaded      (constantPoolHandle this_cp, int which);
+  static Method*          method_at_if_loaded      (const constantPoolHandle& this_cp, int which);
+  static bool       has_appendix_at_if_loaded      (const constantPoolHandle& this_cp, int which);
+  static oop            appendix_at_if_loaded      (const constantPoolHandle& this_cp, int which);
+  static bool    has_method_type_at_if_loaded      (const constantPoolHandle& this_cp, int which);
+  static oop         method_type_at_if_loaded      (const constantPoolHandle& this_cp, int which);
+  static Klass*            klass_at_if_loaded      (const constantPoolHandle& this_cp, int which);
+  static Klass*        klass_ref_at_if_loaded      (const constantPoolHandle& this_cp, int which);
 
   // Routines currently used for annotations (only called by jvm.cpp) but which might be used in the
   // future by other Java code. These take constant pool indices rather than
@@ -797,38 +797,38 @@
   }
 
   // Performs the LinkResolver checks
-  static void verify_constant_pool_resolve(constantPoolHandle this_cp, KlassHandle klass, TRAPS);
+  static void verify_constant_pool_resolve(const constantPoolHandle& this_cp, KlassHandle klass, TRAPS);
 
   // Implementation of methods that needs an exposed 'this' pointer, in order to
   // handle GC while executing the method
-  static Klass* klass_at_impl(constantPoolHandle this_cp, int which,
+  static Klass* klass_at_impl(const constantPoolHandle& this_cp, int which,
                               bool save_resolution_error, TRAPS);
-  static oop string_at_impl(constantPoolHandle this_cp, int which, int obj_index, TRAPS);
+  static oop string_at_impl(const constantPoolHandle& this_cp, int which, int obj_index, TRAPS);
 
-  static void trace_class_resolution(constantPoolHandle this_cp, KlassHandle k);
+  static void trace_class_resolution(const constantPoolHandle& this_cp, KlassHandle k);
 
   // Resolve string constants (to prevent allocation during compilation)
-  static void resolve_string_constants_impl(constantPoolHandle this_cp, TRAPS);
+  static void resolve_string_constants_impl(const constantPoolHandle& this_cp, TRAPS);
 
-  static oop resolve_constant_at_impl(constantPoolHandle this_cp, int index, int cache_index, TRAPS);
-  static oop resolve_bootstrap_specifier_at_impl(constantPoolHandle this_cp, int index, TRAPS);
+  static oop resolve_constant_at_impl(const constantPoolHandle& this_cp, int index, int cache_index, TRAPS);
+  static oop resolve_bootstrap_specifier_at_impl(const constantPoolHandle& this_cp, int index, TRAPS);
 
   // Exception handling
-  static void throw_resolution_error(constantPoolHandle this_cp, int which, TRAPS);
-  static Symbol* exception_message(constantPoolHandle this_cp, int which, constantTag tag, oop pending_exception);
-  static void save_and_throw_exception(constantPoolHandle this_cp, int which, constantTag tag, TRAPS);
+  static void throw_resolution_error(const constantPoolHandle& this_cp, int which, TRAPS);
+  static Symbol* exception_message(const constantPoolHandle& this_cp, int which, constantTag tag, oop pending_exception);
+  static void save_and_throw_exception(const constantPoolHandle& this_cp, int which, constantTag tag, TRAPS);
 
  public:
   // Merging ConstantPool* support:
-  bool compare_entry_to(int index1, constantPoolHandle cp2, int index2, TRAPS);
-  void copy_cp_to(int start_i, int end_i, constantPoolHandle to_cp, int to_i, TRAPS) {
+  bool compare_entry_to(int index1, const constantPoolHandle& cp2, int index2, TRAPS);
+  void copy_cp_to(int start_i, int end_i, const constantPoolHandle& to_cp, int to_i, TRAPS) {
     constantPoolHandle h_this(THREAD, this);
     copy_cp_to_impl(h_this, start_i, end_i, to_cp, to_i, THREAD);
   }
-  static void copy_cp_to_impl(constantPoolHandle from_cp, int start_i, int end_i, constantPoolHandle to_cp, int to_i, TRAPS);
-  static void copy_entry_to(constantPoolHandle from_cp, int from_i, constantPoolHandle to_cp, int to_i, TRAPS);
-  static void copy_operands(constantPoolHandle from_cp, constantPoolHandle to_cp, TRAPS);
-  int  find_matching_entry(int pattern_i, constantPoolHandle search_cp, TRAPS);
+  static void copy_cp_to_impl(const constantPoolHandle& from_cp, int start_i, int end_i, const constantPoolHandle& to_cp, int to_i, TRAPS);
+  static void copy_entry_to(const constantPoolHandle& from_cp, int from_i, const constantPoolHandle& to_cp, int to_i, TRAPS);
+  static void copy_operands(const constantPoolHandle& from_cp, const constantPoolHandle& to_cp, TRAPS);
+  int  find_matching_entry(int pattern_i, const constantPoolHandle& search_cp, TRAPS);
   int  version() const                    { return _saved._version; }
   void set_version(int version)           { _saved._version = version; }
   void increment_and_save_version(int version) {
--- a/hotspot/src/share/vm/oops/cpCache.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/cpCache.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -246,7 +246,7 @@
   set_direct_or_vtable_call(invoke_code, method, index);
 }
 
-void ConstantPoolCacheEntry::set_itable_call(Bytecodes::Code invoke_code, methodHandle method, int index) {
+void ConstantPoolCacheEntry::set_itable_call(Bytecodes::Code invoke_code, const methodHandle& method, int index) {
   assert(method->method_holder()->verify_itable_index(index), "");
   assert(invoke_code == Bytecodes::_invokeinterface, "");
   InstanceKlass* interf = method->method_holder();
@@ -261,15 +261,15 @@
 }
 
 
-void ConstantPoolCacheEntry::set_method_handle(constantPoolHandle cpool, const CallInfo &call_info) {
+void ConstantPoolCacheEntry::set_method_handle(const constantPoolHandle& cpool, const CallInfo &call_info) {
   set_method_handle_common(cpool, Bytecodes::_invokehandle, call_info);
 }
 
-void ConstantPoolCacheEntry::set_dynamic_call(constantPoolHandle cpool, const CallInfo &call_info) {
+void ConstantPoolCacheEntry::set_dynamic_call(const constantPoolHandle& cpool, const CallInfo &call_info) {
   set_method_handle_common(cpool, Bytecodes::_invokedynamic, call_info);
 }
 
-void ConstantPoolCacheEntry::set_method_handle_common(constantPoolHandle cpool,
+void ConstantPoolCacheEntry::set_method_handle_common(const constantPoolHandle& cpool,
                                                       Bytecodes::Code invoke_code,
                                                       const CallInfo &call_info) {
   // NOTE: This CPCE can be the subject of data races.
@@ -361,7 +361,7 @@
   }
 }
 
-Method* ConstantPoolCacheEntry::method_if_resolved(constantPoolHandle cpool) {
+Method* ConstantPoolCacheEntry::method_if_resolved(const constantPoolHandle& cpool) {
   // Decode the action of set_method and set_interface_call
   Bytecodes::Code invoke_code = bytecode_1();
   if (invoke_code != (Bytecodes::Code)0) {
@@ -394,7 +394,7 @@
         int holder_index = cpool->uncached_klass_ref_index_at(constant_pool_index());
         if (cpool->tag_at(holder_index).is_klass()) {
           Klass* klass = cpool->resolved_klass_at(holder_index);
-          if (!klass->oop_is_instance())
+          if (!klass->is_instance_klass())
             klass = SystemDictionary::Object_klass();
           return InstanceKlass::cast(klass)->method_at_vtable(f2_as_index());
         }
@@ -406,7 +406,7 @@
 }
 
 
-oop ConstantPoolCacheEntry::appendix_if_resolved(constantPoolHandle cpool) {
+oop ConstantPoolCacheEntry::appendix_if_resolved(const constantPoolHandle& cpool) {
   if (!has_appendix())
     return NULL;
   const int ref_index = f2_as_index() + _indy_resolved_references_appendix_offset;
@@ -415,7 +415,7 @@
 }
 
 
-oop ConstantPoolCacheEntry::method_type_if_resolved(constantPoolHandle cpool) {
+oop ConstantPoolCacheEntry::method_type_if_resolved(const constantPoolHandle& cpool) {
   if (!has_method_type())
     return NULL;
   const int ref_index = f2_as_index() + _indy_resolved_references_method_type_offset;
--- a/hotspot/src/share/vm/oops/cpCache.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/cpCache.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -246,17 +246,17 @@
 
   void set_itable_call(
     Bytecodes::Code invoke_code,                 // the bytecode used; must be invokeinterface
-    methodHandle method,                         // the resolved interface method
+    const methodHandle& method,                  // the resolved interface method
     int itable_index                             // index into itable for the method
   );
 
   void set_method_handle(
-    constantPoolHandle cpool,                    // holding constant pool (required for locking)
+    const constantPoolHandle& cpool,             // holding constant pool (required for locking)
     const CallInfo &call_info                    // Call link information
   );
 
   void set_dynamic_call(
-    constantPoolHandle cpool,                    // holding constant pool (required for locking)
+    const constantPoolHandle& cpool,             // holding constant pool (required for locking)
     const CallInfo &call_info                    // Call link information
   );
 
@@ -276,7 +276,7 @@
   // resolution logic needs to make slightly different assessments about the
   // number and types of arguments.
   void set_method_handle_common(
-    constantPoolHandle cpool,                    // holding constant pool (required for locking)
+    const constantPoolHandle& cpool,                    // holding constant pool (required for locking)
     Bytecodes::Code invoke_code,                 // _invokehandle or _invokedynamic
     const CallInfo &call_info                    // Call link information
   );
@@ -291,9 +291,9 @@
     _indy_resolved_references_entries
   };
 
-  Method*      method_if_resolved(constantPoolHandle cpool);
-  oop        appendix_if_resolved(constantPoolHandle cpool);
-  oop     method_type_if_resolved(constantPoolHandle cpool);
+  Method*      method_if_resolved(const constantPoolHandle& cpool);
+  oop        appendix_if_resolved(const constantPoolHandle& cpool);
+  oop     method_type_if_resolved(const constantPoolHandle& cpool);
 
   void set_parameter_size(int value);
 
--- a/hotspot/src/share/vm/oops/fieldInfo.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/fieldInfo.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -176,7 +176,7 @@
     return (_shorts[low_packed_offset] & FIELDINFO_TAG_MASK) == FIELDINFO_TAG_OFFSET;
   }
 
-  Symbol* name(constantPoolHandle cp) const {
+  Symbol* name(const constantPoolHandle& cp) const {
     int index = name_index();
     if (is_internal()) {
       return lookup_symbol(index);
@@ -184,7 +184,7 @@
     return cp->symbol_at(index);
   }
 
-  Symbol* signature(constantPoolHandle cp) const {
+  Symbol* signature(const constantPoolHandle& cp) const {
     int index = signature_index();
     if (is_internal()) {
       return lookup_symbol(index);
--- a/hotspot/src/share/vm/oops/fieldStreams.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/fieldStreams.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -79,7 +79,7 @@
     return num_fields;
   }
 
-  FieldStreamBase(Array<u2>* fields, constantPoolHandle constants, int start, int limit) {
+  FieldStreamBase(Array<u2>* fields, const constantPoolHandle& constants, int start, int limit) {
     _fields = fields;
     _constants = constants;
     _index = start;
@@ -91,7 +91,7 @@
     }
   }
 
-  FieldStreamBase(Array<u2>* fields, constantPoolHandle constants) {
+  FieldStreamBase(Array<u2>* fields, const constantPoolHandle& constants) {
     _fields = fields;
     _constants = constants;
     _index = 0;
@@ -251,7 +251,7 @@
 
 class AllFieldStream : public FieldStreamBase {
  public:
-  AllFieldStream(Array<u2>* fields, constantPoolHandle constants): FieldStreamBase(fields, constants) {}
+  AllFieldStream(Array<u2>* fields, const constantPoolHandle& constants): FieldStreamBase(fields, constants) {}
   AllFieldStream(InstanceKlass* k):      FieldStreamBase(k->fields(), k->constants()) {}
   AllFieldStream(instanceKlassHandle k): FieldStreamBase(k->fields(), k->constants()) {}
 };
--- a/hotspot/src/share/vm/oops/generateOopMap.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/generateOopMap.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -222,7 +222,7 @@
   }
 }
 
-void RetTable::compute_ret_table(methodHandle method) {
+void RetTable::compute_ret_table(const methodHandle& method) {
   BytecodeStream i(method);
   Bytecodes::Code bytecode;
 
@@ -2039,7 +2039,7 @@
 //
 //  ============ Main Entry Point ===========
 //
-GenerateOopMap::GenerateOopMap(methodHandle method) {
+GenerateOopMap::GenerateOopMap(const methodHandle& method) {
   // We have to initialize all variables here, that can be queried directly
   _method = method;
   _max_locals=0;
--- a/hotspot/src/share/vm/oops/generateOopMap.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/generateOopMap.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -78,7 +78,7 @@
   void add_jsr(int return_bci, int target_bci);   // Adds entry to list
  public:
   RetTable()                                                  { _first = NULL; }
-  void compute_ret_table(methodHandle method);
+  void compute_ret_table(const methodHandle& method);
   void update_ret_table(int bci, int delta);
   RetTableEntry* find_jsrs_for_target(int targBci);
 };
@@ -462,7 +462,7 @@
 
   friend class RelocCallback;
  public:
-  GenerateOopMap(methodHandle method);
+  GenerateOopMap(const methodHandle& method);
 
   // Compute the map.
   void compute_map(TRAPS);
@@ -537,7 +537,7 @@
 #endif
 
  public:
-  ResolveOopMapConflicts(methodHandle method) : GenerateOopMap(method) { _must_clear_locals = false; };
+  ResolveOopMapConflicts(const methodHandle& method) : GenerateOopMap(method) { _must_clear_locals = false; };
 
   methodHandle do_potential_rewrite(TRAPS);
   bool must_clear_locals() const { return _must_clear_locals; }
@@ -562,7 +562,7 @@
                                            int stack_top)                 {}
   virtual void fill_init_vars             (GrowableArray<intptr_t> *init_vars) {}
  public:
-  GeneratePairingInfo(methodHandle method) : GenerateOopMap(method)       {};
+  GeneratePairingInfo(const methodHandle& method) : GenerateOopMap(method)       {};
 
   // Call compute_map(CHECK) to generate info.
 };
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -742,7 +742,7 @@
   // A class could already be verified, since it has been reflected upon.
   this_k->link_class(CHECK);
 
-  DTRACE_CLASSINIT_PROBE(required, InstanceKlass::cast(this_k()), -1);
+  DTRACE_CLASSINIT_PROBE(required, this_k(), -1);
 
   bool wait = false;
 
@@ -765,19 +765,19 @@
 
     // Step 3
     if (this_k->is_being_initialized() && this_k->is_reentrant_initialization(self)) {
-      DTRACE_CLASSINIT_PROBE_WAIT(recursive, InstanceKlass::cast(this_k()), -1,wait);
+      DTRACE_CLASSINIT_PROBE_WAIT(recursive, this_k(), -1,wait);
       return;
     }
 
     // Step 4
     if (this_k->is_initialized()) {
-      DTRACE_CLASSINIT_PROBE_WAIT(concurrent, InstanceKlass::cast(this_k()), -1,wait);
+      DTRACE_CLASSINIT_PROBE_WAIT(concurrent, this_k(), -1,wait);
       return;
     }
 
     // Step 5
     if (this_k->is_in_error_state()) {
-      DTRACE_CLASSINIT_PROBE_WAIT(erroneous, InstanceKlass::cast(this_k()), -1,wait);
+      DTRACE_CLASSINIT_PROBE_WAIT(erroneous, this_k(), -1,wait);
       ResourceMark rm(THREAD);
       const char* desc = "Could not initialize class ";
       const char* className = this_k->external_name();
@@ -810,7 +810,7 @@
         this_k->set_initialization_state_and_notify(initialization_error, THREAD); // Locks object, set state, and notify all waiting threads
         CLEAR_PENDING_EXCEPTION;   // ignore any exception thrown, superclass initialization error is thrown below
       }
-      DTRACE_CLASSINIT_PROBE_WAIT(super__failed, InstanceKlass::cast(this_k()), -1,wait);
+      DTRACE_CLASSINIT_PROBE_WAIT(super__failed, this_k(), -1,wait);
       THROW_OOP(e());
     }
   }
@@ -826,7 +826,7 @@
   {
     assert(THREAD->is_Java_thread(), "non-JavaThread in initialize_impl");
     JavaThread* jt = (JavaThread*)THREAD;
-    DTRACE_CLASSINIT_PROBE_WAIT(clinit, InstanceKlass::cast(this_k()), -1,wait);
+    DTRACE_CLASSINIT_PROBE_WAIT(clinit, this_k(), -1,wait);
     // Timer includes any side effects of class initialization (resolution,
     // etc), but not recursive entry into call_class_initializer().
     PerfClassTraceTime timer(ClassLoader::perf_class_init_time(),
@@ -860,7 +860,7 @@
       // JVMTI internal flag reset is needed in order to report ExceptionInInitializerError
       JvmtiExport::clear_detected_exception((JavaThread*)THREAD);
     }
-    DTRACE_CLASSINIT_PROBE_WAIT(error, InstanceKlass::cast(this_k()), -1,wait);
+    DTRACE_CLASSINIT_PROBE_WAIT(error, this_k(), -1,wait);
     if (e->is_a(SystemDictionary::Error_klass())) {
       THROW_OOP(e());
     } else {
@@ -870,7 +870,7 @@
                 &args);
     }
   }
-  DTRACE_CLASSINIT_PROBE_WAIT(end, InstanceKlass::cast(this_k()), -1,wait);
+  DTRACE_CLASSINIT_PROBE_WAIT(end, this_k(), -1,wait);
 }
 
 
@@ -907,7 +907,7 @@
   // Filter out subclasses whose supers already implement me.
   // (Note: CHA must walk subclasses of direct implementors
   // in order to locate indirect implementors.)
-  Klass* sk = InstanceKlass::cast(k)->super();
+  Klass* sk = k->super();
   if (sk != NULL && InstanceKlass::cast(sk)->implements_interface(this))
     // We only need to check one immediate superclass, since the
     // implements_interface query looks at transitive_interfaces.
@@ -955,8 +955,7 @@
 
 GrowableArray<Klass*>* InstanceKlass::compute_secondary_supers(int num_extra_slots) {
   // The secondaries are the implemented interfaces.
-  InstanceKlass* ik = InstanceKlass::cast(this);
-  Array<Klass*>* interfaces = ik->transitive_interfaces();
+  Array<Klass*>* interfaces = transitive_interfaces();
   int num_secondaries = num_extra_slots + interfaces->length();
   if (num_secondaries == 0) {
     // Must share this for correct bootstrapping!
@@ -1141,7 +1140,7 @@
 }
 
 
-void InstanceKlass::mask_for(methodHandle method, int bci,
+void InstanceKlass::mask_for(const methodHandle& method, int bci,
   InterpreterOopMap* entry_for) {
   // Dirty read, then double-check under a lock.
   if (_oop_map_cache == NULL) {
@@ -1532,7 +1531,7 @@
     if (method != NULL) {
       return method;
     }
-    klass = InstanceKlass::cast(klass)->super();
+    klass = klass->super();
     overpass_local_mode = skip_overpass;   // Always ignore overpass methods in superclasses
   }
   return NULL;
@@ -1541,13 +1540,13 @@
 #ifdef ASSERT
 // search through class hierarchy and return true if this class or
 // one of the superclasses was redefined
-bool InstanceKlass::has_redefined_this_or_super() const {
-  const InstanceKlass* klass = this;
+bool InstanceKlass::has_redefined_this_or_super() {
+  Klass* klass = this;
   while (klass != NULL) {
-    if (klass->has_been_redefined()) {
+    if (InstanceKlass::cast(klass)->has_been_redefined()) {
       return true;
     }
-    klass = InstanceKlass::cast(klass->super());
+    klass = klass->super();
   }
   return false;
 }
@@ -1645,7 +1644,7 @@
 // locking has to be done very carefully to avoid deadlocks
 // and/or other cache consistency problems.
 //
-jmethodID InstanceKlass::get_jmethod_id(instanceKlassHandle ik_h, methodHandle method_h) {
+jmethodID InstanceKlass::get_jmethod_id(instanceKlassHandle ik_h, const methodHandle& method_h) {
   size_t idnum = (size_t)method_h->method_idnum();
   jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
   size_t length = 0;
@@ -1907,18 +1906,33 @@
 // Decrement count of the nmethod in the dependency list and remove
 // the bucket completely when the count goes to 0.  This method must
 // find a corresponding bucket otherwise there's a bug in the
-// recording of dependencies. Returns true if the bucket is ready for reclamation.
-//
-bool nmethodBucket::remove_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
+// recording of dependencies. Returns true if the bucket was deleted,
+// or marked ready for reclaimation.
+bool nmethodBucket::remove_dependent_nmethod(nmethodBucket** deps, nmethod* nm, bool delete_immediately) {
   assert_locked_or_safepoint(CodeCache_lock);
 
-  for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
+  nmethodBucket* first = *deps;
+  nmethodBucket* last = NULL;
+
+  for (nmethodBucket* b = first; b != NULL; b = b->next()) {
     if (nm == b->get_nmethod()) {
       int val = b->decrement();
       guarantee(val >= 0, "Underflow: %d", val);
-      return (val == 0);
+      if (val == 0) {
+        if (delete_immediately) {
+          if (last == NULL) {
+            *deps = b->next();
+          } else {
+            last->set_next(b->next());
+          }
+          delete b;
+        }
+      }
+      return true;
     }
+    last = b;
   }
+
 #ifdef ASSERT
   tty->print_raw_cr("### can't find dependent nmethod");
   nm->print();
@@ -1927,6 +1941,12 @@
   return false;
 }
 
+// Convenience overload, for callers that don't want to delete the nmethodBucket entry.
+bool nmethodBucket::remove_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
+  nmethodBucket** deps_addr = &deps;
+  return remove_dependent_nmethod(deps_addr, nm, false /* Don't delete */);
+}
+
 //
 // Reclaim all unused buckets. Returns new head of the list.
 //
@@ -2013,10 +2033,10 @@
   _dependencies = nmethodBucket::add_dependent_nmethod(_dependencies, nm);
 }
 
-void InstanceKlass::remove_dependent_nmethod(nmethod* nm) {
+void InstanceKlass::remove_dependent_nmethod(nmethod* nm, bool delete_immediately) {
   assert_locked_or_safepoint(CodeCache_lock);
 
-  if (nmethodBucket::remove_dependent_nmethod(_dependencies, nm)) {
+  if (nmethodBucket::remove_dependent_nmethod(&_dependencies, nm, delete_immediately)) {
     set_has_unloaded_dependent(true);
   }
 }
@@ -2031,6 +2051,13 @@
 }
 #endif //PRODUCT
 
+void InstanceKlass::clean_weak_instanceklass_links(BoolObjectClosure* is_alive) {
+  clean_implementors_list(is_alive);
+  clean_method_data(is_alive);
+
+  clean_dependent_nmethods();
+}
+
 void InstanceKlass::clean_implementors_list(BoolObjectClosure* is_alive) {
   assert(class_loader_data()->is_alive(is_alive), "this klass should be live");
   if (is_interface()) {
@@ -2297,32 +2324,18 @@
 
 // different verisons of is_same_class_package
 bool InstanceKlass::is_same_class_package(Klass* class2) {
-  Klass* class1 = this;
-  oop classloader1 = InstanceKlass::cast(class1)->class_loader();
-  Symbol* classname1 = class1->name();
-
-  if (class2->oop_is_objArray()) {
+  if (class2->is_objArray_klass()) {
     class2 = ObjArrayKlass::cast(class2)->bottom_klass();
   }
-  oop classloader2;
-  if (class2->oop_is_instance()) {
-    classloader2 = InstanceKlass::cast(class2)->class_loader();
-  } else {
-    assert(class2->oop_is_typeArray(), "should be type array");
-    classloader2 = NULL;
-  }
+  oop classloader2 = class2->class_loader();
   Symbol* classname2 = class2->name();
 
-  return InstanceKlass::is_same_class_package(classloader1, classname1,
+  return InstanceKlass::is_same_class_package(class_loader(), name(),
                                               classloader2, classname2);
 }
 
 bool InstanceKlass::is_same_class_package(oop classloader2, Symbol* classname2) {
-  Klass* class1 = this;
-  oop classloader1 = InstanceKlass::cast(class1)->class_loader();
-  Symbol* classname1 = class1->name();
-
-  return InstanceKlass::is_same_class_package(classloader1, classname1,
+  return InstanceKlass::is_same_class_package(class_loader(), name(),
                                               classloader2, classname2);
 }
 
@@ -2385,7 +2398,7 @@
 // Assumes name-signature match
 // "this" is InstanceKlass of super_method which must exist
 // note that the InstanceKlass of the method in the targetclassname has not always been created yet
-bool InstanceKlass::is_override(methodHandle super_method, Handle targetclassloader, Symbol* targetclassname, TRAPS) {
+bool InstanceKlass::is_override(const methodHandle& super_method, Handle targetclassloader, Symbol* targetclassname, TRAPS) {
    // Private methods can not be overridden
    if (super_method->is_private()) {
      return false;
@@ -2411,7 +2424,7 @@
 bool InstanceKlass::is_same_package_member_impl(instanceKlassHandle class1,
                                                 Klass* class2_oop, TRAPS) {
   if (class2_oop == class1())                       return true;
-  if (!class2_oop->oop_is_instance())  return false;
+  if (!class2_oop->is_instance_klass())  return false;
   instanceKlassHandle class2(THREAD, class2_oop);
 
   // must be in same package before we try anything else
@@ -2882,7 +2895,8 @@
   ((InstanceKlass*)this)->do_local_static_fields(&print_static_field);
   st->print_cr(BULLET"---- non-static fields (%d words):", nonstatic_field_size());
   FieldPrinter print_nonstatic_field(st);
-  ((InstanceKlass*)this)->do_nonstatic_fields(&print_nonstatic_field);
+  InstanceKlass* ik = const_cast<InstanceKlass*>(this);
+  ik->do_nonstatic_fields(&print_nonstatic_field);
 
   st->print(BULLET"non-static oop maps: ");
   OopMapBlock* map     = start_of_nonstatic_oop_maps();
@@ -2953,7 +2967,7 @@
     st->print_cr(BULLET"fake entry for oop_size: %d", java_lang_Class::oop_size(obj));
     st->print_cr(BULLET"fake entry for static_oop_field_count: %d", java_lang_Class::static_oop_field_count(obj));
     Klass* real_klass = java_lang_Class::as_Klass(obj);
-    if (real_klass != NULL && real_klass->oop_is_instance()) {
+    if (real_klass != NULL && real_klass->is_instance_klass()) {
       InstanceKlass::cast(real_klass)->do_local_static_fields(&print_field);
     }
   } else if (this == SystemDictionary::MethodType_klass()) {
@@ -3546,3 +3560,199 @@
 unsigned char * InstanceKlass::get_cached_class_file_bytes() {
   return VM_RedefineClasses::get_cached_class_file_bytes(_cached_class_file);
 }
+
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+class TestNmethodBucketContext {
+ public:
+  nmethod* _nmethodLast;
+  nmethod* _nmethodMiddle;
+  nmethod* _nmethodFirst;
+
+  nmethodBucket* _bucketLast;
+  nmethodBucket* _bucketMiddle;
+  nmethodBucket* _bucketFirst;
+
+  nmethodBucket* _bucketList;
+
+  TestNmethodBucketContext() {
+    CodeCache_lock->lock_without_safepoint_check();
+
+    _nmethodLast   = reinterpret_cast<nmethod*>(0x8 * 0);
+    _nmethodMiddle = reinterpret_cast<nmethod*>(0x8 * 1);
+    _nmethodFirst  = reinterpret_cast<nmethod*>(0x8 * 2);
+
+    _bucketLast   = new nmethodBucket(_nmethodLast,   NULL);
+    _bucketMiddle = new nmethodBucket(_nmethodMiddle, _bucketLast);
+    _bucketFirst  = new nmethodBucket(_nmethodFirst,   _bucketMiddle);
+
+    _bucketList = _bucketFirst;
+  }
+
+  ~TestNmethodBucketContext() {
+    delete _bucketLast;
+    delete _bucketMiddle;
+    delete _bucketFirst;
+
+    CodeCache_lock->unlock();
+  }
+};
+
+class TestNmethodBucket {
+ public:
+  static void testRemoveDependentNmethodFirstDeleteImmediately() {
+    TestNmethodBucketContext c;
+
+    nmethodBucket::remove_dependent_nmethod(&c._bucketList, c._nmethodFirst, true /* delete */);
+
+    assert(c._bucketList == c._bucketMiddle, "check");
+    assert(c._bucketList->next() == c._bucketLast, "check");
+    assert(c._bucketList->next()->next() == NULL, "check");
+
+    // Cleanup before context is deleted.
+    c._bucketFirst = NULL;
+  }
+
+  static void testRemoveDependentNmethodMiddleDeleteImmediately() {
+    TestNmethodBucketContext c;
+
+    nmethodBucket::remove_dependent_nmethod(&c._bucketList, c._nmethodMiddle, true /* delete */);
+
+    assert(c._bucketList == c._bucketFirst, "check");
+    assert(c._bucketList->next() == c._bucketLast, "check");
+    assert(c._bucketList->next()->next() == NULL, "check");
+
+    // Cleanup before context is deleted.
+    c._bucketMiddle = NULL;
+  }
+
+  static void testRemoveDependentNmethodLastDeleteImmediately() {
+    TestNmethodBucketContext c;
+
+    nmethodBucket::remove_dependent_nmethod(&c._bucketList, c._nmethodLast, true /* delete */);
+
+    assert(c._bucketList == c._bucketFirst, "check");
+    assert(c._bucketList->next() == c._bucketMiddle, "check");
+    assert(c._bucketList->next()->next() == NULL, "check");
+
+    // Cleanup before context is deleted.
+    c._bucketLast = NULL;
+  }
+
+  static void testRemoveDependentNmethodFirstDeleteDeferred() {
+    TestNmethodBucketContext c;
+
+    nmethodBucket::remove_dependent_nmethod(&c._bucketList, c._nmethodFirst, false /* delete */);
+
+    assert(c._bucketList                         == c._bucketFirst,  "check");
+    assert(c._bucketList->next()                 == c._bucketMiddle, "check");
+    assert(c._bucketList->next()->next()         == c._bucketLast,   "check");
+    assert(c._bucketList->next()->next()->next() == NULL,            "check");
+
+    assert(c._bucketFirst->count()  == 0, "check");
+    assert(c._bucketMiddle->count() == 1, "check");
+    assert(c._bucketLast->count()   == 1, "check");
+  }
+
+  static void testRemoveDependentNmethodMiddleDeleteDeferred() {
+    TestNmethodBucketContext c;
+
+    nmethodBucket::remove_dependent_nmethod(&c._bucketList, c._nmethodMiddle, false /* delete */);
+
+    assert(c._bucketList                         == c._bucketFirst,  "check");
+    assert(c._bucketList->next()                 == c._bucketMiddle, "check");
+    assert(c._bucketList->next()->next()         == c._bucketLast,   "check");
+    assert(c._bucketList->next()->next()->next() == NULL,            "check");
+
+    assert(c._bucketFirst->count()  == 1, "check");
+    assert(c._bucketMiddle->count() == 0, "check");
+    assert(c._bucketLast->count()   == 1, "check");
+  }
+
+  static void testRemoveDependentNmethodLastDeleteDeferred() {
+    TestNmethodBucketContext c;
+
+    nmethodBucket::remove_dependent_nmethod(&c._bucketList, c._nmethodLast, false /* delete */);
+
+    assert(c._bucketList                         == c._bucketFirst,  "check");
+    assert(c._bucketList->next()                 == c._bucketMiddle, "check");
+    assert(c._bucketList->next()->next()         == c._bucketLast,   "check");
+    assert(c._bucketList->next()->next()->next() == NULL,            "check");
+
+    assert(c._bucketFirst->count()  == 1, "check");
+    assert(c._bucketMiddle->count() == 1, "check");
+    assert(c._bucketLast->count()   == 0, "check");
+  }
+
+  static void testRemoveDependentNmethodConvenienceFirst() {
+    TestNmethodBucketContext c;
+
+    nmethodBucket::remove_dependent_nmethod(c._bucketList, c._nmethodFirst);
+
+    assert(c._bucketList                         == c._bucketFirst,  "check");
+    assert(c._bucketList->next()                 == c._bucketMiddle, "check");
+    assert(c._bucketList->next()->next()         == c._bucketLast,   "check");
+    assert(c._bucketList->next()->next()->next() == NULL,            "check");
+
+    assert(c._bucketFirst->count()  == 0, "check");
+    assert(c._bucketMiddle->count() == 1, "check");
+    assert(c._bucketLast->count()   == 1, "check");
+  }
+
+  static void testRemoveDependentNmethodConvenienceMiddle() {
+    TestNmethodBucketContext c;
+
+    nmethodBucket::remove_dependent_nmethod(c._bucketList, c._nmethodMiddle);
+
+    assert(c._bucketList                         == c._bucketFirst,  "check");
+    assert(c._bucketList->next()                 == c._bucketMiddle, "check");
+    assert(c._bucketList->next()->next()         == c._bucketLast,   "check");
+    assert(c._bucketList->next()->next()->next() == NULL,            "check");
+
+    assert(c._bucketFirst->count()  == 1, "check");
+    assert(c._bucketMiddle->count() == 0, "check");
+    assert(c._bucketLast->count()   == 1, "check");
+  }
+
+  static void testRemoveDependentNmethodConvenienceLast() {
+    TestNmethodBucketContext c;
+
+    nmethodBucket::remove_dependent_nmethod(c._bucketList, c._nmethodLast);
+
+    assert(c._bucketList                         == c._bucketFirst,  "check");
+    assert(c._bucketList->next()                 == c._bucketMiddle, "check");
+    assert(c._bucketList->next()->next()         == c._bucketLast,   "check");
+    assert(c._bucketList->next()->next()->next() == NULL,            "check");
+
+    assert(c._bucketFirst->count()  == 1, "check");
+    assert(c._bucketMiddle->count() == 1, "check");
+    assert(c._bucketLast->count()   == 0, "check");
+  }
+
+  static void testRemoveDependentNmethod() {
+    testRemoveDependentNmethodFirstDeleteImmediately();
+    testRemoveDependentNmethodMiddleDeleteImmediately();
+    testRemoveDependentNmethodLastDeleteImmediately();
+
+    testRemoveDependentNmethodFirstDeleteDeferred();
+    testRemoveDependentNmethodMiddleDeleteDeferred();
+    testRemoveDependentNmethodLastDeleteDeferred();
+
+    testRemoveDependentNmethodConvenienceFirst();
+    testRemoveDependentNmethodConvenienceMiddle();
+    testRemoveDependentNmethodConvenienceLast();
+  }
+
+  static void test() {
+    testRemoveDependentNmethod();
+  }
+};
+
+void TestNmethodBucket_test() {
+  TestNmethodBucket::test();
+}
+
+#endif
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -414,7 +414,7 @@
   };
 
   // method override check
-  bool is_override(methodHandle super_method, Handle targetclassloader, Symbol* targetclassname, TRAPS);
+  bool is_override(const methodHandle& super_method, Handle targetclassloader, Symbol* targetclassname, TRAPS);
 
   // package
   bool is_same_class_package(Klass* class2);
@@ -780,7 +780,7 @@
 
   // jmethodID support
   static jmethodID get_jmethod_id(instanceKlassHandle ik_h,
-                     methodHandle method_h);
+                     const methodHandle& method_h);
   static jmethodID get_jmethod_id_fetch_or_update(instanceKlassHandle ik_h,
                      size_t idnum, jmethodID new_id, jmethodID* new_jmeths,
                      jmethodID* to_dealloc_id_p,
@@ -827,7 +827,7 @@
   // OopMapCache support
   OopMapCache* oop_map_cache()               { return _oop_map_cache; }
   void set_oop_map_cache(OopMapCache *cache) { _oop_map_cache = cache; }
-  void mask_for(methodHandle method, int bci, InterpreterOopMap* entry);
+  void mask_for(const methodHandle& method, int bci, InterpreterOopMap* entry);
 
   // JNI identifier support (for static fields - for jni performance)
   JNIid* jni_ids()                               { return _jni_ids; }
@@ -837,7 +837,7 @@
   // maintenance of deoptimization dependencies
   int mark_dependent_nmethods(DepChange& changes);
   void add_dependent_nmethod(nmethod* nm);
-  void remove_dependent_nmethod(nmethod* nm);
+  void remove_dependent_nmethod(nmethod* nm, bool delete_immediately);
 
   // On-stack replacement support
   nmethod* osr_nmethods_head() const         { return _osr_nmethods_head; };
@@ -862,7 +862,7 @@
 
 #ifdef ASSERT
   // check whether this class or one of its superclasses was redefined
-  bool has_redefined_this_or_super() const;
+  bool has_redefined_this_or_super();
 #endif
 
   // Access to the implementor of an interface.
@@ -908,7 +908,9 @@
   bool compute_is_subtype_of(Klass* k);
   bool can_be_primary_super_slow() const;
   int oop_size(oop obj)  const             { return size_helper(); }
-  bool oop_is_instance_slow() const        { return true; }
+  // slow because it's a virtual call and used for verifying the layout_helper.
+  // Using the layout_helper bits, we can call is_instance_klass without a virtual call.
+  DEBUG_ONLY(bool is_instance_klass_slow() const      { return true; })
 
   // Iterators
   void do_local_static_fields(FieldClosure* cl);
@@ -922,7 +924,8 @@
 
   // Casting from Klass*
   static InstanceKlass* cast(Klass* k) {
-    assert(k == NULL || k->oop_is_instance(), "cast to InstanceKlass");
+    assert(k != NULL, "k should not be null");
+    assert(k->is_instance_klass(), "cast to InstanceKlass");
     return static_cast<InstanceKlass*>(k);
   }
 
@@ -1021,6 +1024,7 @@
   void adjust_default_methods(InstanceKlass* holder, bool* trace_name_printed);
 #endif // INCLUDE_JVMTI
 
+  void clean_weak_instanceklass_links(BoolObjectClosure* is_alive);
   void clean_implementors_list(BoolObjectClosure* is_alive);
   void clean_method_data(BoolObjectClosure* is_alive);
   void clean_dependent_nmethods();
@@ -1349,6 +1353,7 @@
 
   static int mark_dependent_nmethods(nmethodBucket* deps, DepChange& changes);
   static nmethodBucket* add_dependent_nmethod(nmethodBucket* deps, nmethod* nm);
+  static bool remove_dependent_nmethod(nmethodBucket** deps, nmethod* nm, bool delete_immediately);
   static bool remove_dependent_nmethod(nmethodBucket* deps, nmethod* nm);
   static nmethodBucket* clean_dependent_nmethods(nmethodBucket* deps);
 #ifndef PRODUCT
--- a/hotspot/src/share/vm/oops/instanceMirrorKlass.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/instanceMirrorKlass.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -40,7 +40,7 @@
 int InstanceMirrorKlass::_offset_of_static_fields = 0;
 
 int InstanceMirrorKlass::instance_size(KlassHandle k) {
-  if (k() != NULL && k->oop_is_instance()) {
+  if (k() != NULL && k->is_instance_klass()) {
     return align_object_size(size_helper() + InstanceKlass::cast(k())->static_field_size());
   }
   return size_helper();
@@ -65,7 +65,7 @@
 
 int InstanceMirrorKlass::compute_static_oop_field_count(oop obj) {
   Klass* k = java_lang_Class::as_Klass(obj);
-  if (k != NULL && k->oop_is_instance()) {
+  if (k != NULL && k->is_instance_klass()) {
     return InstanceKlass::cast(k)->static_oop_field_count();
   }
   return 0;
--- a/hotspot/src/share/vm/oops/instanceMirrorKlass.inline.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/instanceMirrorKlass.inline.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -60,7 +60,7 @@
     Klass* klass = java_lang_Class::as_Klass(obj);
     // We'll get NULL for primitive mirrors.
     if (klass != NULL) {
-      if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) {
+      if (klass->is_instance_klass() && InstanceKlass::cast(klass)->is_anonymous()) {
         // An anonymous class doesn't have its own class loader, so when handling
         // the java mirror for an anonymous class we need to make sure its class
         // loader data is claimed, this is done by calling do_cld explicitly.
--- a/hotspot/src/share/vm/oops/instanceRefKlass.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/instanceRefKlass.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -82,8 +82,9 @@
   // Verify next field
   oop next = java_lang_ref_Reference::next(obj);
   if (next != NULL) {
-    guarantee(next->is_oop(), "next field verify failed");
-    guarantee(next->is_instanceRef(), "next field verify failed");
+    guarantee(next->is_oop(), "next field should be an oop");
+    guarantee(next->is_instance(), "next field should be an instance");
+    guarantee(InstanceKlass::cast(next->klass())->is_reference_instance_klass(), "next field verify failed");
   }
 }
 
--- a/hotspot/src/share/vm/oops/klass.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/klass.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -348,7 +348,7 @@
 
 
 InstanceKlass* Klass::superklass() const {
-  assert(super() == NULL || super()->oop_is_instance(), "must be instance klass");
+  assert(super() == NULL || super()->is_instance_klass(), "must be instance klass");
   return _super == NULL ? NULL : InstanceKlass::cast(_super);
 }
 
@@ -440,10 +440,9 @@
     }
 
     // Clean the implementors list and method data.
-    if (clean_alive_klasses && current->oop_is_instance()) {
+    if (clean_alive_klasses && current->is_instance_klass()) {
       InstanceKlass* ik = InstanceKlass::cast(current);
-      ik->clean_implementors_list(is_alive);
-      ik->clean_method_data(is_alive);
+      ik->clean_weak_instanceklass_links(is_alive);
     }
   }
 }
@@ -558,9 +557,11 @@
 
 oop Klass::class_loader() const { return class_loader_data()->class_loader(); }
 
+// In product mode, this function doesn't have virtual function calls so
+// there might be some performance advantage to handling InstanceKlass here.
 const char* Klass::external_name() const {
-  if (oop_is_instance()) {
-    InstanceKlass* ik = (InstanceKlass*) this;
+  if (is_instance_klass()) {
+    const InstanceKlass* ik = static_cast<const InstanceKlass*>(this);
     if (ik->is_anonymous()) {
       intptr_t hash = 0;
       if (ik->java_mirror() != NULL) {
@@ -688,19 +689,13 @@
 #ifndef PRODUCT
 
 bool Klass::verify_vtable_index(int i) {
-  if (oop_is_instance()) {
-    int limit = ((InstanceKlass*)this)->vtable_length()/vtableEntry::size();
-    assert(i >= 0 && i < limit, "index %d out of bounds %d", i, limit);
-  } else {
-    assert(oop_is_array(), "Must be");
-    int limit = ((ArrayKlass*)this)->vtable_length()/vtableEntry::size();
-    assert(i >= 0 && i < limit, "index %d out of bounds %d", i, limit);
-  }
+  int limit = vtable_length()/vtableEntry::size();
+  assert(i >= 0 && i < limit, "index %d out of bounds %d", i, limit);
   return true;
 }
 
 bool Klass::verify_itable_index(int i) {
-  assert(oop_is_instance(), "");
+  assert(is_instance_klass(), "");
   int method_count = klassItable::method_count_for_interface(this);
   assert(i >= 0 && i < method_count, "index out of bounds");
   return true;
@@ -716,11 +711,11 @@
  public:
   static void test_oop_is_instanceClassLoader() {
     Klass* klass = SystemDictionary::ClassLoader_klass();
-    guarantee(klass->oop_is_instance(), "assert");
+    guarantee(klass->is_instance_klass(), "assert");
     guarantee(InstanceKlass::cast(klass)->is_class_loader_instance_klass(), "test failed");
 
     klass = SystemDictionary::String_klass();
-    guarantee(!klass->oop_is_instance() ||
+    guarantee(!klass->is_instance_klass() ||
               !InstanceKlass::cast(klass)->is_class_loader_instance_klass(),
               "test failed");
   }
@@ -730,4 +725,4 @@
   TestKlass::test_oop_is_instanceClassLoader();
 }
 
-#endif
+#endif  // PRODUCT
--- a/hotspot/src/share/vm/oops/klass.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/klass.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -373,8 +373,8 @@
 #endif
 
   // vtables
-  virtual klassVtable* vtable() const        { return NULL; }
-  virtual int vtable_length() const          { return 0; }
+  virtual klassVtable* vtable() const = 0;
+  virtual int vtable_length() const = 0;
 
   // subclass check
   bool is_subclass_of(const Klass* k) const;
@@ -474,11 +474,13 @@
   virtual const char* signature_name() const;
 
   // type testing operations
+#ifdef ASSERT
  protected:
-  virtual bool oop_is_instance_slow()       const { return false; }
-  virtual bool oop_is_array_slow()          const { return false; }
-  virtual bool oop_is_objArray_slow()       const { return false; }
-  virtual bool oop_is_typeArray_slow()      const { return false; }
+  virtual bool is_instance_klass_slow()     const { return false; }
+  virtual bool is_array_klass_slow()        const { return false; }
+  virtual bool is_objArray_klass_slow()     const { return false; }
+  virtual bool is_typeArray_klass_slow()    const { return false; }
+#endif // ASSERT
  public:
 
   // Fast non-virtual versions
@@ -492,18 +494,18 @@
   }
  public:
   #endif
-  inline  bool oop_is_instance()            const { return assert_same_query(
-                                                    layout_helper_is_instance(layout_helper()),
-                                                    oop_is_instance_slow()); }
-  inline  bool oop_is_array()               const { return assert_same_query(
+  inline  bool is_instance_klass()            const { return assert_same_query(
+                                                      layout_helper_is_instance(layout_helper()),
+                                                      is_instance_klass_slow()); }
+  inline  bool is_array_klass()               const { return assert_same_query(
                                                     layout_helper_is_array(layout_helper()),
-                                                    oop_is_array_slow()); }
-  inline  bool oop_is_objArray()            const { return assert_same_query(
+                                                    is_array_klass_slow()); }
+  inline  bool is_objArray_klass()            const { return assert_same_query(
                                                     layout_helper_is_objArray(layout_helper()),
-                                                    oop_is_objArray_slow()); }
-  inline  bool oop_is_typeArray()           const { return assert_same_query(
+                                                    is_objArray_klass_slow()); }
+  inline  bool is_typeArray_klass()           const { return assert_same_query(
                                                     layout_helper_is_typeArray(layout_helper()),
-                                                    oop_is_typeArray_slow()); }
+                                                    is_typeArray_klass_slow()); }
   #undef assert_same_query
 
   // Access flags
--- a/hotspot/src/share/vm/oops/klass.inline.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/klass.inline.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
 #include "oops/markOop.hpp"
 
 inline void Klass::set_prototype_header(markOop header) {
-  assert(!header->has_bias_pattern() || oop_is_instance(), "biased locking currently only supported for Java instances");
+  assert(!header->has_bias_pattern() || is_instance_klass(), "biased locking currently only supported for Java instances");
   _prototype_header = header;
 }
 
--- a/hotspot/src/share/vm/oops/klassVtable.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/klassVtable.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -39,9 +39,7 @@
 #include "utilities/copy.hpp"
 
 inline InstanceKlass* klassVtable::ik() const {
-  Klass* k = _klass();
-  assert(k->oop_is_instance(), "not an InstanceKlass");
-  return (InstanceKlass*)k;
+  return InstanceKlass::cast(_klass());
 }
 
 
@@ -66,8 +64,7 @@
   int vtable_length = 0;
 
   // start off with super's vtable length
-  InstanceKlass* sk = (InstanceKlass*)super;
-  vtable_length = super == NULL ? 0 : sk->vtable_length();
+  vtable_length = super == NULL ? 0 : super->vtable_length();
 
   // go thru each method in the methods table to see if it needs a new entry
   int len = methods->length();
@@ -131,10 +128,7 @@
     return 0;
   } else {
     // copy methods from superKlass
-    // can't inherit from array class, so must be InstanceKlass
-    assert(super->oop_is_instance(), "must be instance klass");
-    InstanceKlass* sk = (InstanceKlass*)super();
-    klassVtable* superVtable = sk->vtable();
+    klassVtable* superVtable = super->vtable();
     assert(superVtable->length() <= _length, "vtable too short");
 #ifdef ASSERT
     superVtable->verify(tty, true);
@@ -143,7 +137,7 @@
 #ifndef PRODUCT
     if (PrintVtables && Verbose) {
       ResourceMark rm;
-      tty->print_cr("copy vtable from %s to %s size %d", sk->internal_name(), klass()->internal_name(), _length);
+      tty->print_cr("copy vtable from %s to %s size %d", super->internal_name(), klass()->internal_name(), _length);
     }
 #endif
     return superVtable->length();
@@ -158,7 +152,7 @@
   KlassHandle super (THREAD, klass()->java_super());
   int nofNewEntries = 0;
 
-  if (PrintVtables && !klass()->oop_is_array()) {
+  if (PrintVtables && !klass()->is_array_klass()) {
     ResourceMark rm(THREAD);
     tty->print_cr("Initializing: %s", _klass->name()->as_C_string());
   }
@@ -176,10 +170,10 @@
   }
 
   int super_vtable_len = initialize_from_super(super);
-  if (klass()->oop_is_array()) {
+  if (klass()->is_array_klass()) {
     assert(super_vtable_len == _length, "arrays shouldn't introduce new methods");
   } else {
-    assert(_klass->oop_is_instance(), "must be InstanceKlass");
+    assert(_klass->is_instance_klass(), "must be InstanceKlass");
 
     Array<Method*>* methods = ik()->methods();
     int len = methods->length();
@@ -303,7 +297,7 @@
       break;
     }
     // if no override found yet, continue to search up
-    superk = InstanceKlass::cast(superk->super());
+    superk = superk->super() == NULL ? NULL : InstanceKlass::cast(superk->super());
   }
 
   return superk;
@@ -318,7 +312,7 @@
                                           bool checkconstraints, TRAPS) {
   ResourceMark rm;
   bool allocate_new = true;
-  assert(klass->oop_is_instance(), "must be InstanceKlass");
+  assert(klass->is_instance_klass(), "must be InstanceKlass");
 
   Array<int>* def_vtable_indices = NULL;
   bool is_default = false;
@@ -761,15 +755,14 @@
      return false;
    }
 
-  InstanceKlass* cursuper;
-  // Iterate on all superclasses, which should have instanceKlasses
+  // Iterate on all superclasses, which should be InstanceKlasses.
   // Note that we explicitly look for overpasses at each level.
   // Overpasses may or may not exist for supers for pass 1,
   // they should have been created for pass 2 and later.
 
-  for (cursuper = InstanceKlass::cast(super); cursuper != NULL;  cursuper = (InstanceKlass*)cursuper->super())
+  for (Klass* cursuper = super; cursuper != NULL; cursuper = cursuper->super())
   {
-     if (cursuper->find_local_method(name, signature,
+     if (InstanceKlass::cast(cursuper)->find_local_method(name, signature,
            Klass::find_overpass, Klass::skip_static, Klass::skip_private) != NULL) {
        return false;
      }
@@ -1117,7 +1110,7 @@
 }
 
 int klassItable::method_count_for_interface(Klass* interf) {
-  assert(interf->oop_is_instance(), "must be");
+  assert(interf->is_instance_klass(), "must be");
   assert(interf->is_interface(), "must be");
   Array<Method*>* methods = InstanceKlass::cast(interf)->methods();
   int nof_methods = methods->length();
@@ -1534,11 +1527,11 @@
     klassVtable* vt = kl->vtable();
     if (vt == NULL) return;
     no_klasses++;
-    if (kl->oop_is_instance()) {
+    if (kl->is_instance_klass()) {
       no_instance_klasses++;
       kl->array_klasses_do(do_class);
     }
-    if (kl->oop_is_array()) {
+    if (kl->is_array_klass()) {
       no_array_klasses++;
       sum_of_array_vtable_len += vt->length();
     }
--- a/hotspot/src/share/vm/oops/method.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/method.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -299,10 +299,7 @@
 
 
 Symbol* Method::klass_name() const {
-  Klass* k = method_holder();
-  assert(k->is_klass(), "must be klass");
-  InstanceKlass* ik = (InstanceKlass*) k;
-  return ik->name();
+  return method_holder()->name();
 }
 
 
@@ -366,7 +363,7 @@
 
 // Build a MethodData* object to hold information about this method
 // collected in the interpreter.
-void Method::build_interpreter_method_data(methodHandle method, TRAPS) {
+void Method::build_interpreter_method_data(const methodHandle& method, TRAPS) {
   // Do not profile the method if metaspace has hit an OOM previously
   // allocating profiling data. Callers clear pending exception so don't
   // add one here.
@@ -897,7 +894,7 @@
 
 // Called when the method_holder is getting linked. Setup entrypoints so the method
 // is ready to be called from interpreter, compiler, and vtables.
-void Method::link_method(methodHandle h_method, TRAPS) {
+void Method::link_method(const methodHandle& h_method, TRAPS) {
   // If the code cache is full, we may reenter this function for the
   // leftover methods that weren't linked.
   if (_i2i_entry != NULL) return;
@@ -1178,7 +1175,7 @@
 
 Klass* Method::check_non_bcp_klass(Klass* klass) {
   if (klass != NULL && klass->class_loader() != NULL) {
-    if (klass->oop_is_objArray())
+    if (klass->is_objArray_klass())
       klass = ObjArrayKlass::cast(klass)->bottom_klass();
     return klass;
   }
@@ -1305,6 +1302,73 @@
   return vmSymbols::find_sid(klass_name);
 }
 
+static bool is_unsafe_alias(vmSymbols::SID name_id) {
+  // All 70 intrinsic candidate methods from sun.misc.Unsafe in 1.8.
+  // Some have the same method name but different signature, e.g.
+  // getByte(long), getByte(Object,long)
+  switch (name_id) {
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(allocateInstance_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(copyMemory_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(loadFence_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(storeFence_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(fullFence_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getObject_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getBoolean_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getByte_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getShort_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getChar_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getInt_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getLong_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getFloat_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getDouble_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(putObject_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(putBoolean_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(putByte_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(putShort_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(putChar_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(putInt_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(putLong_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(putFloat_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(putDouble_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getObjectVolatile_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getBooleanVolatile_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getByteVolatile_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getShortVolatile_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getCharVolatile_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getIntVolatile_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getLongVolatile_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getFloatVolatile_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getDoubleVolatile_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(putObjectVolatile_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(putBooleanVolatile_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(putByteVolatile_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(putShortVolatile_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(putCharVolatile_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(putIntVolatile_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(putLongVolatile_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(putFloatVolatile_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(putDoubleVolatile_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getAddress_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(putAddress_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(compareAndSwapObject_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(compareAndSwapLong_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(compareAndSwapInt_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(putOrderedObject_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(putOrderedLong_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(putOrderedInt_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getAndAddInt_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getAndAddLong_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getAndSetInt_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getAndSetLong_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(getAndSetObject_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(park_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(unpark_name):
+      return true;
+  }
+
+  return false;
+}
+
 void Method::init_intrinsic_id() {
   assert(_intrinsic_id == vmIntrinsics::_none, "do this just once");
   const uintptr_t max_id_uint = right_n_bits((int)(sizeof(_intrinsic_id) * BitsPerByte));
@@ -1357,6 +1421,14 @@
     if (is_static() != MethodHandles::is_signature_polymorphic_static(id))
       id = vmIntrinsics::_none;
     break;
+
+  case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_misc_Unsafe):
+    // Map sun.misc.Unsafe to jdk.internal.misc.Unsafe
+    if (!is_unsafe_alias(name_id))  break;
+    // pretend it is the corresponding method in the internal Unsafe class:
+    klass_id = vmSymbols::VM_SYMBOL_ENUM_NAME(jdk_internal_misc_Unsafe);
+    id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
+    break;
   }
 
   if (id != vmIntrinsics::_none) {
--- a/hotspot/src/share/vm/oops/method.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/method.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -393,7 +393,7 @@
   bool was_executed_more_than(int n);
   bool was_never_executed()                      { return !was_executed_more_than(0); }
 
-  static void build_interpreter_method_data(methodHandle method, TRAPS);
+  static void build_interpreter_method_data(const methodHandle& method, TRAPS);
 
   static MethodCounters* build_method_counters(Method* m, TRAPS);
 
@@ -435,7 +435,7 @@
   address get_c2i_unverified_entry();
   AdapterHandlerEntry* adapter() {  return _adapter; }
   // setup entry points
-  void link_method(methodHandle method, TRAPS);
+  void link_method(const methodHandle& method, TRAPS);
   // clear entry points. Used by sharing code
   void unlink_method();
 
--- a/hotspot/src/share/vm/oops/methodData.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/methodData.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -708,7 +708,7 @@
 // A MethodData* holds information which has been collected about
 // a method.
 
-MethodData* MethodData::allocate(ClassLoaderData* loader_data, methodHandle method, TRAPS) {
+MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) {
   int size = MethodData::compute_allocation_size_in_words(method);
 
   return new (loader_data, size, false, MetaspaceObj::MethodDataType, THREAD)
@@ -898,7 +898,7 @@
 
 // Compute the size of the MethodData* necessary to store
 // profiling information about a given method.  Size is in bytes.
-int MethodData::compute_allocation_size_in_bytes(methodHandle method) {
+int MethodData::compute_allocation_size_in_bytes(const methodHandle& method) {
   int data_size = 0;
   BytecodeStream stream(method);
   Bytecodes::Code c;
@@ -931,7 +931,7 @@
 
 // Compute the size of the MethodData* necessary to store
 // profiling information about a given method.  Size is in words
-int MethodData::compute_allocation_size_in_words(methodHandle method) {
+int MethodData::compute_allocation_size_in_words(const methodHandle& method) {
   int byte_size = compute_allocation_size_in_bytes(method);
   int word_size = align_size_up(byte_size, BytesPerWord) / BytesPerWord;
   return align_object_size(word_size);
@@ -1129,7 +1129,7 @@
 }
 
 // Initialize the MethodData* corresponding to a given method.
-MethodData::MethodData(methodHandle method, int size, TRAPS)
+MethodData::MethodData(const methodHandle& method, int size, TRAPS)
   : _extra_data_lock(Monitor::leaf, "MDO extra data lock"),
     _parameters_type_data_di(parameters_uninitialized) {
   // Set the method back-pointer.
@@ -1513,7 +1513,7 @@
   // not yet implemented.
 }
 
-bool MethodData::profile_jsr292(methodHandle m, int bci) {
+bool MethodData::profile_jsr292(const methodHandle& m, int bci) {
   if (m->is_compiled_lambda_form()) {
     return true;
   }
@@ -1538,7 +1538,7 @@
   return profile_arguments_flag() == type_profile_all;
 }
 
-bool MethodData::profile_arguments_for_invoke(methodHandle m, int bci) {
+bool MethodData::profile_arguments_for_invoke(const methodHandle& m, int bci) {
   if (!profile_arguments()) {
     return false;
   }
@@ -1567,7 +1567,7 @@
   return profile_return_flag() == type_profile_all;
 }
 
-bool MethodData::profile_return_for_invoke(methodHandle m, int bci) {
+bool MethodData::profile_return_for_invoke(const methodHandle& m, int bci) {
   if (!profile_return()) {
     return false;
   }
@@ -1596,7 +1596,7 @@
   return profile_parameters_flag() == type_profile_all;
 }
 
-bool MethodData::profile_parameters_for_method(methodHandle m) {
+bool MethodData::profile_parameters_for_method(const methodHandle& m) {
   if (!profile_parameters()) {
     return false;
   }
--- a/hotspot/src/share/vm/oops/methodData.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/methodData.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2145,9 +2145,9 @@
 
   Mutex _extra_data_lock;
 
-  MethodData(methodHandle method, int size, TRAPS);
+  MethodData(const methodHandle& method, int size, TRAPS);
 public:
-  static MethodData* allocate(ClassLoaderData* loader_data, methodHandle method, TRAPS);
+  static MethodData* allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS);
   MethodData() : _extra_data_lock(Monitor::leaf, "MDO extra data lock") {}; // For ciMethodData
 
   bool is_methodData() const volatile { return true; }
@@ -2283,13 +2283,13 @@
     type_profile_all = 2
   };
 
-  static bool profile_jsr292(methodHandle m, int bci);
+  static bool profile_jsr292(const methodHandle& m, int bci);
   static int profile_arguments_flag();
   static bool profile_all_arguments();
-  static bool profile_arguments_for_invoke(methodHandle m, int bci);
+  static bool profile_arguments_for_invoke(const methodHandle& m, int bci);
   static int profile_return_flag();
   static bool profile_all_return();
-  static bool profile_return_for_invoke(methodHandle m, int bci);
+  static bool profile_return_for_invoke(const methodHandle& m, int bci);
   static int profile_parameters_flag();
   static bool profile_parameters_jsr292_only();
   static bool profile_all_parameters();
@@ -2304,8 +2304,8 @@
   }
 
   // Compute the size of a MethodData* before it is created.
-  static int compute_allocation_size_in_bytes(methodHandle method);
-  static int compute_allocation_size_in_words(methodHandle method);
+  static int compute_allocation_size_in_bytes(const methodHandle& method);
+  static int compute_allocation_size_in_words(const methodHandle& method);
   static int compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps);
 
   // Determine if a given bytecode can have profile information.
@@ -2589,7 +2589,7 @@
   void verify_on(outputStream* st);
   void verify_data_on(outputStream* st);
 
-  static bool profile_parameters_for_method(methodHandle m);
+  static bool profile_parameters_for_method(const methodHandle& m);
   static bool profile_arguments();
   static bool profile_arguments_jsr292_only();
   static bool profile_return();
--- a/hotspot/src/share/vm/oops/objArrayKlass.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/objArrayKlass.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -102,7 +102,7 @@
 
   // Create type name for klass.
   Symbol* name = NULL;
-  if (!element_klass->oop_is_instance() ||
+  if (!element_klass->is_instance_klass() ||
       (name = InstanceKlass::cast(element_klass())->array_name()) == NULL) {
 
     ResourceMark rm(THREAD);
@@ -111,17 +111,17 @@
     char *new_str = NEW_RESOURCE_ARRAY(char, len + 4);
     int idx = 0;
     new_str[idx++] = '[';
-    if (element_klass->oop_is_instance()) { // it could be an array or simple type
+    if (element_klass->is_instance_klass()) { // it could be an array or simple type
       new_str[idx++] = 'L';
     }
     memcpy(&new_str[idx], name_str, len * sizeof(char));
     idx += len;
-    if (element_klass->oop_is_instance()) {
+    if (element_klass->is_instance_klass()) {
       new_str[idx++] = ';';
     }
     new_str[idx++] = '\0';
     name = SymbolTable::new_permanent_symbol(new_str, CHECK_0);
-    if (element_klass->oop_is_instance()) {
+    if (element_klass->is_instance_klass()) {
       InstanceKlass* ik = InstanceKlass::cast(element_klass());
       ik->set_array_name(name);
     }
@@ -150,18 +150,18 @@
   name->decrement_refcount();
 
   Klass* bk;
-  if (element_klass->oop_is_objArray()) {
+  if (element_klass->is_objArray_klass()) {
     bk = ObjArrayKlass::cast(element_klass())->bottom_klass();
   } else {
     bk = element_klass();
   }
-  assert(bk != NULL && (bk->oop_is_instance() || bk->oop_is_typeArray()), "invalid bottom klass");
+  assert(bk != NULL && (bk->is_instance_klass() || bk->is_typeArray_klass()), "invalid bottom klass");
   this->set_bottom_klass(bk);
   this->set_class_loader_data(bk->class_loader_data());
 
   this->set_layout_helper(array_layout_helper(T_OBJECT));
-  assert(this->oop_is_array(), "sanity");
-  assert(this->oop_is_objArray(), "sanity");
+  assert(this->is_array_klass(), "sanity");
+  assert(this->is_objArray_klass(), "sanity");
 }
 
 int ObjArrayKlass::oop_size(oop obj) const {
@@ -336,7 +336,7 @@
         ak->set_lower_dimension(this);
         OrderAccess::storestore();
         set_higher_dimension(ak);
-        assert(ak->oop_is_objArray(), "incorrect initialization of ObjArrayKlass");
+        assert(ak->is_objArray_klass(), "incorrect initialization of ObjArrayKlass");
       }
     }
   } else {
@@ -386,7 +386,7 @@
 }
 
 bool ObjArrayKlass::compute_is_subtype_of(Klass* k) {
-  if (!k->oop_is_objArray())
+  if (!k->is_objArray_klass())
     return ArrayKlass::compute_is_subtype_of(k);
 
   ObjArrayKlass* oak = ObjArrayKlass::cast(k);
@@ -484,7 +484,7 @@
   guarantee(element_klass()->is_klass(), "should be klass");
   guarantee(bottom_klass()->is_klass(), "should be klass");
   Klass* bk = bottom_klass();
-  guarantee(bk->oop_is_instance() || bk->oop_is_typeArray(),  "invalid bottom klass");
+  guarantee(bk->is_instance_klass() || bk->is_typeArray_klass(),  "invalid bottom klass");
 }
 
 void ObjArrayKlass::oop_verify_on(oop obj, outputStream* st) {
--- a/hotspot/src/share/vm/oops/objArrayKlass.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/objArrayKlass.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,7 +60,7 @@
   bool can_be_primary_super_slow() const;
   GrowableArray<Klass*>* compute_secondary_supers(int num_extra_slots);
   bool compute_is_subtype_of(Klass* k);
-  bool oop_is_objArray_slow()  const  { return true; }
+  DEBUG_ONLY(bool is_objArray_klass_slow()  const  { return true; })
   int oop_size(oop obj) const;
 
   // Allocation
@@ -91,8 +91,8 @@
  public:
   // Casting from Klass*
   static ObjArrayKlass* cast(Klass* k) {
-    assert(k->oop_is_objArray(), "cast to ObjArrayKlass");
-    return (ObjArrayKlass*) k;
+    assert(k->is_objArray_klass(), "cast to ObjArrayKlass");
+    return static_cast<ObjArrayKlass*>(k);
   }
 
   // Sizing
--- a/hotspot/src/share/vm/oops/oop.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/oop.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -129,9 +129,6 @@
 
 // type test operations that doesn't require inclusion of oop.inline.hpp.
 bool oopDesc::is_instance_noinline()          const { return is_instance();            }
-bool oopDesc::is_instanceMirror_noinline()    const { return is_instanceMirror();      }
-bool oopDesc::is_instanceClassLoader_noline() const { return is_instanceClassLoader(); }
-bool oopDesc::is_instanceRef_noline()         const { return is_instanceRef();         }
 bool oopDesc::is_array_noinline()             const { return is_array();               }
 bool oopDesc::is_objArray_noinline()          const { return is_objArray();            }
 bool oopDesc::is_typeArray_noinline()         const { return is_typeArray();           }
--- a/hotspot/src/share/vm/oops/oop.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/oop.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -110,18 +110,12 @@
 
   // type test operations (inlined in oop.inline.hpp)
   bool is_instance()            const;
-  bool is_instanceMirror()      const;
-  bool is_instanceClassLoader() const;
-  bool is_instanceRef()         const;
   bool is_array()               const;
   bool is_objArray()            const;
   bool is_typeArray()           const;
 
   // type test operations that don't require inclusion of oop.inline.hpp.
   bool is_instance_noinline()          const;
-  bool is_instanceMirror_noinline()    const;
-  bool is_instanceClassLoader_noline() const;
-  bool is_instanceRef_noline()         const;
   bool is_array_noinline()             const;
   bool is_objArray_noinline()          const;
   bool is_typeArray_noinline()         const;
--- a/hotspot/src/share/vm/oops/oop.inline.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -127,27 +127,12 @@
 inline bool oopDesc::is_a(Klass* k)        const { return klass()->is_subtype_of(k); }
 
 inline bool oopDesc::is_instance() const {
-  return klass()->oop_is_instance();
-}
-
-inline bool oopDesc::is_instanceClassLoader() const {
-  Klass* k = klass();
-  return k->oop_is_instance() && InstanceKlass::cast(k)->is_class_loader_instance_klass();
+  return klass()->is_instance_klass();
 }
 
-inline bool oopDesc::is_instanceMirror() const {
-  Klass* k = klass();
-  return k->oop_is_instance() && InstanceKlass::cast(k)->is_mirror_instance_klass();
-}
-
-inline bool oopDesc::is_instanceRef() const {
-  Klass* k = klass();
-  return k->oop_is_instance() && InstanceKlass::cast(k)->is_reference_instance_klass();
-}
-
-inline bool oopDesc::is_array()               const { return klass()->oop_is_array(); }
-inline bool oopDesc::is_objArray()            const { return klass()->oop_is_objArray(); }
-inline bool oopDesc::is_typeArray()           const { return klass()->oop_is_typeArray(); }
+inline bool oopDesc::is_array()               const { return klass()->is_array_klass(); }
+inline bool oopDesc::is_objArray()            const { return klass()->is_objArray_klass(); }
+inline bool oopDesc::is_typeArray()           const { return klass()->is_typeArray_klass(); }
 
 inline void*     oopDesc::field_base(int offset)        const { return (void*)&((char*)this)[offset]; }
 
@@ -724,7 +709,7 @@
 
 inline void oopDesc::pc_update_contents() {
   Klass* k = klass();
-  if (!k->oop_is_typeArray()) {
+  if (!k->is_typeArray_klass()) {
     // It might contain oops beyond the header, so take the virtual call.
     k->oop_pc_update_pointers(this);
   }
@@ -733,7 +718,7 @@
 
 inline void oopDesc::ps_push_contents(PSPromotionManager* pm) {
   Klass* k = klass();
-  if (!k->oop_is_typeArray()) {
+  if (!k->is_typeArray_klass()) {
     // It might contain oops beyond the header, so take the virtual call.
     k->oop_ps_push_contents(this, pm);
   }
--- a/hotspot/src/share/vm/oops/typeArrayKlass.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/typeArrayKlass.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -43,7 +43,7 @@
 #include "utilities/macros.hpp"
 
 bool TypeArrayKlass::compute_is_subtype_of(Klass* k) {
-  if (!k->oop_is_typeArray()) {
+  if (!k->is_typeArray_klass()) {
     return ArrayKlass::compute_is_subtype_of(k);
   }
 
@@ -86,8 +86,8 @@
 
 TypeArrayKlass::TypeArrayKlass(BasicType type, Symbol* name) : ArrayKlass(name) {
   set_layout_helper(array_layout_helper(type));
-  assert(oop_is_array(), "sanity");
-  assert(oop_is_typeArray(), "sanity");
+  assert(is_array_klass(), "sanity");
+  assert(is_typeArray_klass(), "sanity");
 
   set_max_length(arrayOopDesc::max_array_length(type));
   assert(size() >= TypeArrayKlass::header_size(), "bad size");
@@ -181,7 +181,7 @@
         h_ak->set_lower_dimension(this);
         OrderAccess::storestore();
         set_higher_dimension(h_ak);
-        assert(h_ak->oop_is_objArray(), "incorrect initialization of ObjArrayKlass");
+        assert(h_ak->is_objArray_klass(), "incorrect initialization of ObjArrayKlass");
       }
     }
   } else {
--- a/hotspot/src/share/vm/oops/typeArrayKlass.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/oops/typeArrayKlass.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,7 +47,7 @@
   void set_max_length(jint m)           { _max_length = m;    }
 
   // testers
-  bool oop_is_typeArray_slow() const    { return true; }
+  DEBUG_ONLY(bool is_typeArray_klass_slow() const  { return true; })
 
   // klass allocation
   static TypeArrayKlass* create_klass(BasicType type, const char* name_str,
@@ -122,8 +122,8 @@
  public:
   // Casting from Klass*
   static TypeArrayKlass* cast(Klass* k) {
-    assert(k->oop_is_typeArray(), "cast to TypeArrayKlass");
-    return (TypeArrayKlass*) k;
+    assert(k->is_typeArray_klass(), "cast to TypeArrayKlass");
+    return static_cast<TypeArrayKlass*>(k);
   }
 
   // Naming
--- a/hotspot/src/share/vm/opto/c2compiler.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/opto/c2compiler.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -157,7 +157,7 @@
   Compile::print_timers();
 }
 
-bool C2Compiler::is_intrinsic_supported(methodHandle method, bool is_virtual) {
+bool C2Compiler::is_intrinsic_supported(const methodHandle& method, bool is_virtual) {
   vmIntrinsics::ID id = method->intrinsic_id();
   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
 
--- a/hotspot/src/share/vm/opto/c2compiler.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/opto/c2compiler.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -56,13 +56,13 @@
   // possible for only a limited set of available intrinsics whereas
   // a non-virtual dispatch is possible for all available intrinsics.)
   // Return false otherwise.
-  virtual bool is_intrinsic_supported(methodHandle method) {
+  virtual bool is_intrinsic_supported(const methodHandle& method) {
     return is_intrinsic_supported(method, false);
   }
 
   // Check if the compiler supports an intrinsic for 'method' given the
   // the dispatch mode specified by the 'is_virtual' parameter.
-  virtual bool is_intrinsic_supported(methodHandle method, bool is_virtual);
+  virtual bool is_intrinsic_supported(const methodHandle& method, bool is_virtual);
 
   // Initial size of the code buffer (may be increased at runtime)
   static int initial_code_buffer_size();
--- a/hotspot/src/share/vm/opto/callGenerator.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/opto/callGenerator.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -671,7 +671,7 @@
                                            &exact_receiver);
 
   SafePointNode* slow_map = NULL;
-  JVMState* slow_jvms;
+  JVMState* slow_jvms = NULL;
   { PreserveJVMState pjvms(&kit);
     kit.set_control(slow_ctl);
     if (!kit.stopped()) {
--- a/hotspot/src/share/vm/opto/compile.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/opto/compile.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -3757,7 +3757,7 @@
   MacroAssembler _masm(&cb);
   for (int i = 0; i < _constants.length(); i++) {
     Constant con = _constants.at(i);
-    address constant_addr;
+    address constant_addr = NULL;
     switch (con.type()) {
     case T_LONG:   constant_addr = _masm.long_constant(  con.get_jlong()  ); break;
     case T_FLOAT:  constant_addr = _masm.float_constant( con.get_jfloat() ); break;
--- a/hotspot/src/share/vm/opto/compile.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/opto/compile.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -89,7 +89,7 @@
 typedef unsigned int node_idx_t;
 class NodeCloneInfo {
  private:
-  uint64_t  _idx_clone_orig;
+  uint64_t _idx_clone_orig;
  public:
 
   void set_idx(node_idx_t idx) {
@@ -98,17 +98,17 @@
   node_idx_t idx() const { return (node_idx_t)(_idx_clone_orig & 0xFFFFFFFF); }
 
   void set_gen(int generation) {
-    uint64_t  g = (uint64_t)generation << 32;
+    uint64_t g = (uint64_t)generation << 32;
     _idx_clone_orig = _idx_clone_orig & 0xFFFFFFFF | g;
   }
   int gen() const { return (int)(_idx_clone_orig >> 32); }
 
-  void set(uint64_t x) {  _idx_clone_orig = x; }
-  void set(node_idx_t x, int g) {  set_idx(x); set_gen(g); }
+  void set(uint64_t x) { _idx_clone_orig = x; }
+  void set(node_idx_t x, int g) { set_idx(x); set_gen(g); }
   uint64_t get() const { return _idx_clone_orig; }
 
   NodeCloneInfo(uint64_t idx_clone_orig) : _idx_clone_orig(idx_clone_orig) {}
-  NodeCloneInfo(node_idx_t x, int g) {set(x, g);}
+  NodeCloneInfo(node_idx_t x, int g) : _idx_clone_orig(0) { set(x, g); }
 
   void dump() const;
 };
--- a/hotspot/src/share/vm/opto/generateOptoStub.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/opto/generateOptoStub.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -261,7 +261,7 @@
 
   //-----------------------------
   // If this is a normal subroutine return, issue the return and be done.
-  Node *ret;
+  Node *ret = NULL;
   switch( is_fancy_jump ) {
   case 0:                       // Make a return instruction
     // Return to caller, free any space for return address
--- a/hotspot/src/share/vm/opto/lcm.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/opto/lcm.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -806,7 +806,7 @@
   block->insert_node(proj, node_cnt++);
 
   // Select the right register save policy.
-  const char * save_policy;
+  const char *save_policy = NULL;
   switch (op) {
     case Op_CallRuntime:
     case Op_CallLeaf:
--- a/hotspot/src/share/vm/opto/library_call.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1364,7 +1364,7 @@
 // public static double Math.log10(double)
 bool LibraryCallKit::inline_math(vmIntrinsics::ID id) {
   Node* arg = round_double_node(argument(0));
-  Node* n;
+  Node* n = NULL;
   switch (id) {
   case vmIntrinsics::_dabs:   n = new AbsDNode(                arg);  break;
   case vmIntrinsics::_dsqrt:  n = new SqrtDNode(C, control(),  arg);  break;
@@ -2108,7 +2108,7 @@
 // inline long       Long.reverseBytes(long)
 bool LibraryCallKit::inline_number_methods(vmIntrinsics::ID id) {
   Node* arg = argument(0);
-  Node* n;
+  Node* n = NULL;
   switch (id) {
   case vmIntrinsics::_numberOfLeadingZeros_i:   n = new CountLeadingZerosINode( arg);  break;
   case vmIntrinsics::_numberOfLeadingZeros_l:   n = new CountLeadingZerosLNode( arg);  break;
@@ -2648,7 +2648,7 @@
 
   // For now, we handle only those cases that actually exist: ints,
   // longs, and Object. Adding others should be straightforward.
-  Node* load_store;
+  Node* load_store = NULL;
   switch(type) {
   case T_INT:
     if (kind == LS_xadd) {
@@ -2779,9 +2779,9 @@
 }
 
 //----------------------------inline_unsafe_ordered_store----------------------
-// public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
-// public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
-// public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
+// public native void Unsafe.putOrderedObject(Object o, long offset, Object x);
+// public native void Unsafe.putOrderedInt(Object o, long offset, int x);
+// public native void Unsafe.putOrderedLong(Object o, long offset, long x);
 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
   // This is another variant of inline_unsafe_access, differing in
   // that it always issues store-store ("release") barrier and ensures
@@ -2875,7 +2875,7 @@
 }
 
 //----------------------------inline_unsafe_allocate---------------------------
-// public native Object sun.misc.Unsafe.allocateInstance(Class<?> cls);
+// public native Object Unsafe.allocateInstance(Class<?> cls);
 bool LibraryCallKit::inline_unsafe_allocate() {
   if (callee()->is_static())  return false;  // caller must have the capability!
 
@@ -3654,7 +3654,7 @@
   Node* end               = is_copyOfRange? argument(2): argument(1);
   Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);
 
-  Node* newcopy;
+  Node* newcopy = NULL;
 
   // Set the original stack and the reexecute bit for the interpreter to reexecute
   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
@@ -4089,7 +4089,7 @@
 
 bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
   Node* arg = argument(0);
-  Node* result;
+  Node* result = NULL;
 
   switch (id) {
   case vmIntrinsics::_floatToRawIntBits:    result = new MoveF2INode(arg);  break;
@@ -4194,7 +4194,7 @@
 #endif //_LP64
 
 //----------------------inline_unsafe_copyMemory-------------------------
-// public native void sun.misc.Unsafe.copyMemory(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
+// public native void Unsafe.copyMemory(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
 bool LibraryCallKit::inline_unsafe_copyMemory() {
   if (callee()->is_static())  return false;  // caller must have the capability!
   null_check_receiver();  // null-check receiver
@@ -5718,7 +5718,7 @@
 
 //------------------------------inline_aescrypt_Block-----------------------
 bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
-  address stubAddr;
+  address stubAddr = NULL;
   const char *stubName;
   assert(UseAES, "need AES instruction support");
 
@@ -5784,8 +5784,8 @@
 
 //------------------------------inline_cipherBlockChaining_AESCrypt-----------------------
 bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) {
-  address stubAddr;
-  const char *stubName;
+  address stubAddr = NULL;
+  const char *stubName = NULL;
 
   assert(UseAES, "need AES instruction support");
 
--- a/hotspot/src/share/vm/opto/macro.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/opto/macro.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -779,10 +779,10 @@
   ciKlass* klass = NULL;
   ciInstanceKlass* iklass = NULL;
   int nfields = 0;
-  int array_base;
-  int element_size;
-  BasicType basic_elem_type;
-  ciType* elem_type;
+  int array_base = 0;
+  int element_size = 0;
+  BasicType basic_elem_type = T_ILLEGAL;
+  ciType* elem_type = NULL;
 
   Node* res = alloc->result_cast();
   assert(res == NULL || res->is_CheckCastPP(), "unexpected AllocateNode result");
@@ -1305,10 +1305,10 @@
   // We need a Region and corresponding Phi's to merge the slow-path and fast-path results.
   // they will not be used if "always_slow" is set
   enum { slow_result_path = 1, fast_result_path = 2 };
-  Node *result_region;
-  Node *result_phi_rawmem;
-  Node *result_phi_rawoop;
-  Node *result_phi_i_o;
+  Node *result_region = NULL;
+  Node *result_phi_rawmem = NULL;
+  Node *result_phi_rawoop = NULL;
+  Node *result_phi_i_o = NULL;
 
   // The initial slow comparison is a size check, the comparison
   // we want to do is a BoolTest::gt
--- a/hotspot/src/share/vm/opto/memnode.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/opto/memnode.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -997,7 +997,7 @@
 // "Acquire" - no following ref can move before (but earlier refs can
 // follow, like an early Load stalled in cache).  Requires multi-cpu
 // visibility.  Inserted independ of any load, as required
-// for intrinsic sun.misc.Unsafe.loadFence().
+// for intrinsic Unsafe.loadFence().
 class LoadFenceNode: public MemBarNode {
 public:
   LoadFenceNode(Compile* C, int alias_idx, Node* precedent)
@@ -1018,7 +1018,7 @@
 // "Release" - no earlier ref can move after (but later refs can move
 // up, like a speculative pipelined cache-hitting Load).  Requires
 // multi-cpu visibility.  Inserted independent of any store, as required
-// for intrinsic sun.misc.Unsafe.storeFence().
+// for intrinsic Unsafe.storeFence().
 class StoreFenceNode: public MemBarNode {
 public:
   StoreFenceNode(Compile* C, int alias_idx, Node* precedent)
--- a/hotspot/src/share/vm/opto/parse1.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/opto/parse1.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -108,7 +108,7 @@
 
   // Very similar to LoadNode::make, except we handle un-aligned longs and
   // doubles on Sparc.  Intel can handle them just fine directly.
-  Node *l;
+  Node *l = NULL;
   switch (bt) {                // Signature is flattened
   case T_INT:     l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT,        MemNode::unordered); break;
   case T_FLOAT:   l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT,         MemNode::unordered); break;
@@ -1903,7 +1903,7 @@
   // Now use a Phi here for merging
   assert(!nocreate, "Cannot build a phi for a block already parsed.");
   const JVMState* jvms = map->jvms();
-  const Type* t;
+  const Type* t = NULL;
   if (jvms->is_loc(idx)) {
     t = block()->local_type_at(idx - jvms->locoff());
   } else if (jvms->is_stk(idx)) {
--- a/hotspot/src/share/vm/opto/runtime.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/opto/runtime.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -260,7 +260,7 @@
   // Scavenge and allocate an instance.
   oop result;
 
-  if (array_type->oop_is_typeArray()) {
+  if (array_type->is_typeArray_klass()) {
     // The oopFactory likes to work with the element type.
     // (We could bypass the oopFactory, since it doesn't add much value.)
     BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
@@ -298,7 +298,7 @@
   // Scavenge and allocate an instance.
   oop result;
 
-  assert(array_type->oop_is_typeArray(), "should be called only for type array");
+  assert(array_type->is_typeArray_klass(), "should be called only for type array");
   // The oopFactory likes to work with the element type.
   BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
   result = oopFactory::new_typeArray_nozero(elem_type, len, THREAD);
--- a/hotspot/src/share/vm/opto/split_if.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/opto/split_if.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -451,8 +451,8 @@
 
   // Replace both uses of 'new_iff' with Regions merging True/False
   // paths.  This makes 'new_iff' go dead.
-  Node *old_false, *old_true;
-  Node *new_false, *new_true;
+  Node *old_false = NULL, *old_true = NULL;
+  Node *new_false = NULL, *new_true = NULL;
   for (DUIterator_Last j2min, j2 = iff->last_outs(j2min); j2 >= j2min; --j2) {
     Node *ifp = iff->last_out(j2);
     assert( ifp->Opcode() == Op_IfFalse || ifp->Opcode() == Op_IfTrue, "" );
--- a/hotspot/src/share/vm/opto/superword.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/opto/superword.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -3665,7 +3665,7 @@
 }
 
 int SuperWord::mark_generations() {
-  Node *ii_err = 0, *tail_err;
+  Node *ii_err = NULL, *tail_err = NULL;
   for (int i = 0; i < _mem_slice_head.length(); i++) {
     Node* phi  = _mem_slice_head.at(i);
     assert(phi->is_Phi(), "must be phi");
--- a/hotspot/src/share/vm/prims/jni.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/prims/jni.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -563,7 +563,7 @@
   // return mirror for superclass
   Klass* super = k->java_super();
   // super2 is the value computed by the compiler's getSuperClass intrinsic:
-  debug_only(Klass* super2 = ( k->oop_is_array()
+  debug_only(Klass* super2 = ( k->is_array_klass()
                                  ? SystemDictionary::Object_klass()
                                  : k->super() ) );
   assert(super == super2,
@@ -1344,14 +1344,14 @@
   if (name == vmSymbols::object_initializer_name() ||
       name == vmSymbols::class_initializer_name()) {
     // Never search superclasses for constructors
-    if (klass->oop_is_instance()) {
+    if (klass->is_instance_klass()) {
       m = InstanceKlass::cast(klass())->find_method(name, signature);
     } else {
       m = NULL;
     }
   } else {
     m = klass->lookup_method(name, signature);
-    if (m == NULL &&  klass->oop_is_instance()) {
+    if (m == NULL &&  klass->is_instance_klass()) {
       m = InstanceKlass::cast(klass())->lookup_method_in_ordered_interfaces(name, signature);
     }
   }
@@ -2038,7 +2038,7 @@
   k()->initialize(CHECK_NULL);
 
   fieldDescriptor fd;
-  if (!k()->oop_is_instance() ||
+  if (!k()->is_instance_klass() ||
       !InstanceKlass::cast(k())->find_field(fieldname, signame, false, &fd)) {
     THROW_MSG_0(vmSymbols::java_lang_NoSuchFieldError(), (char*) name);
   }
@@ -2292,7 +2292,7 @@
   k()->initialize(CHECK_NULL);
 
   fieldDescriptor fd;
-  if (!k()->oop_is_instance() ||
+  if (!k()->is_instance_klass() ||
       !InstanceKlass::cast(k())->find_field(fieldname, signame, true, &fd)) {
     THROW_MSG_0(vmSymbols::java_lang_NoSuchFieldError(), (char*) name);
   }
@@ -3045,7 +3045,7 @@
  HOTSPOT_JNI_UNREGISTERNATIVES_ENTRY(env, clazz);
   Klass* k   = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(clazz));
   //%note jni_2
-  if (k->oop_is_instance()) {
+  if (k->is_instance_klass()) {
     for (int index = 0; index < InstanceKlass::cast(k)->methods()->length(); index++) {
       Method* m = InstanceKlass::cast(k)->methods()->at(index);
       if (m->is_native()) {
@@ -3852,6 +3852,7 @@
   unit_test_function_call
 
 // Forward declaration
+void TestNmethodBucket_test();
 void test_semaphore();
 void TestOS_test();
 void TestReservedSpace_test();
@@ -3875,11 +3876,13 @@
 void FreeRegionList_test();
 void test_memset_with_concurrent_readers();
 void TestPredictions_test();
+void WorkerDataArray_test();
 #endif
 
 void execute_internal_vm_tests() {
   if (ExecuteInternalVMTests) {
     tty->print_cr("Running internal VM tests");
+    run_unit_test(TestNmethodBucket_test());
     run_unit_test(test_semaphore());
     run_unit_test(TestOS_test());
     run_unit_test(TestReservedSpace_test());
@@ -3918,6 +3921,7 @@
     }
     run_unit_test(test_memset_with_concurrent_readers());
     run_unit_test(TestPredictions_test());
+    run_unit_test(WorkerDataArray_test());
 #endif
     tty->print_cr("All internal VM tests passed");
   }
@@ -4059,6 +4063,10 @@
     OrderAccess::release_store(&vm_created, 0);
   }
 
+  // Flush stdout and stderr before exit.
+  fflush(stdout);
+  fflush(stderr);
+
   return result;
 
 }
--- a/hotspot/src/share/vm/prims/jniCheck.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/prims/jniCheck.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -514,7 +514,7 @@
   ASSERT_OOPS_ALLOWED;
   assert(klass != NULL, "klass argument must have a value");
 
-  if (!klass->oop_is_instance() ||
+  if (!klass->is_instance_klass() ||
       !InstanceKlass::cast(klass)->is_subclass_of(SystemDictionary::Throwable_klass())) {
     ReportJNIFatalError(thr, fatal_class_not_a_throwable_class);
   }
--- a/hotspot/src/share/vm/prims/jvm.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/prims/jvm.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1022,10 +1022,10 @@
   KlassHandle klass(thread, java_lang_Class::as_Klass(mirror));
   // Figure size of result array
   int size;
-  if (klass->oop_is_instance()) {
+  if (klass->is_instance_klass()) {
     size = InstanceKlass::cast(klass())->local_interfaces()->length();
   } else {
-    assert(klass->oop_is_objArray() || klass->oop_is_typeArray(), "Illegal mirror klass");
+    assert(klass->is_objArray_klass() || klass->is_typeArray_klass(), "Illegal mirror klass");
     size = 2;
   }
 
@@ -1033,7 +1033,7 @@
   objArrayOop r = oopFactory::new_objArray(SystemDictionary::Class_klass(), size, CHECK_NULL);
   objArrayHandle result (THREAD, r);
   // Fill in result
-  if (klass->oop_is_instance()) {
+  if (klass->is_instance_klass()) {
     // Regular instance klass, fill in all local interfaces
     for (int index = 0; index < size; index++) {
       Klass* k = InstanceKlass::cast(klass())->local_interfaces()->at(index);
@@ -1056,7 +1056,7 @@
   }
   Klass* k = java_lang_Class::as_Klass(mirror);
   jboolean result = k->is_interface();
-  assert(!result || k->oop_is_instance(),
+  assert(!result || k->is_instance_klass(),
          "all interfaces are instance types");
   // The compiler intrinsic for isInterface tests the
   // Klass::_access_flags bits in the same way.
@@ -1097,7 +1097,7 @@
     // Signers are only set once, ClassLoader.java, and thus shouldn't
     // be called with an array.  Only the bootstrap loader creates arrays.
     Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(cls));
-    if (k->oop_is_instance()) {
+    if (k->is_instance_klass()) {
       java_lang_Class::set_signers(k->java_mirror(), objArrayOop(JNIHandles::resolve(signers)));
     }
   }
@@ -1156,7 +1156,7 @@
 // Create an AccessControlContext with a protection domain with null codesource
 // and null permissions - which gives no permissions.
 oop create_dummy_access_control_context(TRAPS) {
-  InstanceKlass* pd_klass = InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass());
+  InstanceKlass* pd_klass = SystemDictionary::ProtectionDomain_klass();
   Handle obj = pd_klass->allocate_instance_handle(CHECK_NULL);
   // Call constructor ProtectionDomain(null, null);
   JavaValue result(T_VOID);
@@ -1356,7 +1356,7 @@
 JVM_QUICK_ENTRY(jboolean, JVM_IsArrayClass(JNIEnv *env, jclass cls))
   JVMWrapper("JVM_IsArrayClass");
   Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(cls));
-  return (k != NULL) && k->oop_is_array() ? true : false;
+  return (k != NULL) && k->is_array_klass() ? true : false;
 JVM_END
 
 
@@ -1389,7 +1389,7 @@
   // of an InstanceKlass
 
   if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass)) ||
-      ! java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass))->oop_is_instance()) {
+      ! java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass))->is_instance_klass()) {
     oop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_NULL);
     return (jobjectArray)JNIHandles::make_local(env, result);
   }
@@ -1453,7 +1453,7 @@
 {
   // ofClass is a reference to a java_lang_Class object.
   if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass)) ||
-      ! java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass))->oop_is_instance()) {
+      ! java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass))->is_instance_klass()) {
     return NULL;
   }
 
@@ -1471,7 +1471,7 @@
 {
   oop mirror = JNIHandles::resolve_non_null(cls);
   if (java_lang_Class::is_primitive(mirror) ||
-      !java_lang_Class::as_Klass(mirror)->oop_is_instance()) {
+      !java_lang_Class::as_Klass(mirror)->is_instance_klass()) {
     return NULL;
   }
   instanceKlassHandle k(THREAD, InstanceKlass::cast(java_lang_Class::as_Klass(mirror)));
@@ -1496,7 +1496,7 @@
   // Return null for arrays and primatives
   if (!java_lang_Class::is_primitive(JNIHandles::resolve(cls))) {
     Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve(cls));
-    if (k->oop_is_instance()) {
+    if (k->is_instance_klass()) {
       Symbol* sym = InstanceKlass::cast(k)->generic_signature();
       if (sym == NULL) return NULL;
       Handle str = java_lang_String::create_from_symbol(sym, CHECK_NULL);
@@ -1514,7 +1514,7 @@
   // Return null for arrays and primitives
   if (!java_lang_Class::is_primitive(JNIHandles::resolve(cls))) {
     Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve(cls));
-    if (k->oop_is_instance()) {
+    if (k->is_instance_klass()) {
       typeArrayOop a = Annotations::make_java_array(InstanceKlass::cast(k)->class_annotations(), CHECK_NULL);
       return (jbyteArray) JNIHandles::make_local(env, a);
     }
@@ -1584,7 +1584,7 @@
   // Return null for arrays and primitives
   if (!java_lang_Class::is_primitive(JNIHandles::resolve(cls))) {
     Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve(cls));
-    if (k->oop_is_instance()) {
+    if (k->is_instance_klass()) {
       AnnotationArray* type_annotations = InstanceKlass::cast(k)->class_type_annotations();
       if (type_annotations != NULL) {
         typeArrayOop a = Annotations::make_java_array(type_annotations, CHECK_NULL);
@@ -1693,7 +1693,7 @@
 
   // Exclude primitive types and array types
   if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass)) ||
-      java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass))->oop_is_array()) {
+      java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass))->is_array_klass()) {
     // Return empty array
     oop res = oopFactory::new_objArray(SystemDictionary::reflect_Field_klass(), 0, CHECK_NULL);
     return (jobjectArray) JNIHandles::make_local(env, res);
@@ -1767,7 +1767,7 @@
 
   // Exclude primitive types and array types
   if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass))
-      || java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass))->oop_is_array()) {
+      || java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass))->is_array_klass()) {
     // Return empty array
     oop res = oopFactory::new_objArray(klass, 0, CHECK_NULL);
     return (jobjectArray) JNIHandles::make_local(env, res);
@@ -1868,7 +1868,7 @@
   // Return null for primitives and arrays
   if (!java_lang_Class::is_primitive(JNIHandles::resolve_non_null(cls))) {
     Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(cls));
-    if (k->oop_is_instance()) {
+    if (k->is_instance_klass()) {
       instanceKlassHandle k_h(THREAD, k);
       Handle jcp = sun_reflect_ConstantPool::create(CHECK_NULL);
       sun_reflect_ConstantPool::set_cp(jcp(), k_h->constants());
@@ -2136,8 +2136,8 @@
   if (java_lang_Class::is_primitive(r)) return false;
 
   Klass* k = java_lang_Class::as_Klass(r);
-  assert(k->oop_is_instance(), "must be an instance klass");
-  if (! k->oop_is_instance()) return false;
+  assert(k->is_instance_klass(), "must be an instance klass");
+  if (!k->is_instance_klass()) return false;
 
   ResourceMark rm(THREAD);
   const char* name = k->name()->as_C_string();
@@ -2182,12 +2182,12 @@
   k = JvmtiThreadState::class_to_verify_considering_redefinition(k, thread);
   // types will have length zero if this is not an InstanceKlass
   // (length is determined by call to JVM_GetClassCPEntriesCount)
-  if (k->oop_is_instance()) {
+  if (k->is_instance_klass()) {
     ConstantPool* cp = InstanceKlass::cast(k)->constants();
     for (int index = cp->length() - 1; index >= 0; index--) {
       constantTag tag = cp->tag_at(index);
       types[index] = (tag.is_unresolved_klass()) ? JVM_CONSTANT_Class : tag.value();
-  }
+    }
   }
 JVM_END
 
@@ -2196,9 +2196,7 @@
   JVMWrapper("JVM_GetClassCPEntriesCount");
   Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(cls));
   k = JvmtiThreadState::class_to_verify_considering_redefinition(k, thread);
-  if (!k->oop_is_instance())
-    return 0;
-  return InstanceKlass::cast(k)->constants()->length();
+  return (!k->is_instance_klass()) ? 0 : InstanceKlass::cast(k)->constants()->length();
 JVM_END
 
 
@@ -2206,9 +2204,7 @@
   JVMWrapper("JVM_GetClassFieldsCount");
   Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(cls));
   k = JvmtiThreadState::class_to_verify_considering_redefinition(k, thread);
-  if (!k->oop_is_instance())
-    return 0;
-  return InstanceKlass::cast(k)->java_fields_count();
+  return (!k->is_instance_klass()) ? 0 : InstanceKlass::cast(k)->java_fields_count();
 JVM_END
 
 
@@ -2216,9 +2212,7 @@
   JVMWrapper("JVM_GetClassMethodsCount");
   Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(cls));
   k = JvmtiThreadState::class_to_verify_considering_redefinition(k, thread);
-  if (!k->oop_is_instance())
-    return 0;
-  return InstanceKlass::cast(k)->methods()->length();
+  return (!k->is_instance_klass()) ? 0 : InstanceKlass::cast(k)->methods()->length();
 JVM_END
 
 
@@ -3476,7 +3470,7 @@
 
   KlassHandle klass_handle(THREAD, klass);
   // Check if we should initialize the class
-  if (init && klass_handle->oop_is_instance()) {
+  if (init && klass_handle->is_instance_klass()) {
     klass_handle->initialize(CHECK_NULL);
   }
   return (jclass) JNIHandles::make_local(env, klass_handle->java_mirror());
@@ -3624,7 +3618,7 @@
     return NULL;
   }
   Klass* k = java_lang_Class::as_Klass(mirror());
-  if (!k->oop_is_instance()) {
+  if (!k->is_instance_klass()) {
     return NULL;
   }
   instanceKlassHandle ik_h(THREAD, k);
--- a/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -125,7 +125,7 @@
 // JVMSpec|     u2 attributes_count;
 // JVMSpec|     attribute_info attributes[attributes_count];
 // JVMSpec|   }
-void JvmtiClassFileReconstituter::write_code_attribute(methodHandle method) {
+void JvmtiClassFileReconstituter::write_code_attribute(const methodHandle& method) {
   ConstMethod* const_method = method->constMethod();
   u2 line_num_cnt = 0;
   int stackmap_len = 0;
@@ -415,7 +415,7 @@
 }
 
 // Compute size of LineNumberTable
-u2 JvmtiClassFileReconstituter::line_number_table_entries(methodHandle method) {
+u2 JvmtiClassFileReconstituter::line_number_table_entries(const methodHandle& method) {
   // The line number table is compressed so we don't know how big it is until decompressed.
   // Decompression is really fast so we just do it twice.
   u2 num_entries = 0;
@@ -435,7 +435,7 @@
 // JVMSpec|        u2 line_number;
 // JVMSpec|     } line_number_table[line_number_table_length];
 // JVMSpec|   }
-void JvmtiClassFileReconstituter::write_line_number_table_attribute(methodHandle method,
+void JvmtiClassFileReconstituter::write_line_number_table_attribute(const methodHandle& method,
                                                                     u2 num_entries) {
 
   write_attribute_name_index("LineNumberTable");
@@ -461,7 +461,7 @@
 // JVMSpec|       u2 index;
 // JVMSpec|     } local_variable_table[local_variable_table_length];
 // JVMSpec|   }
-void JvmtiClassFileReconstituter::write_local_variable_table_attribute(methodHandle method, u2 num_entries) {
+void JvmtiClassFileReconstituter::write_local_variable_table_attribute(const methodHandle& method, u2 num_entries) {
     write_attribute_name_index("LocalVariableTable");
     write_u4(2 + num_entries * (2 + 2 + 2 + 2 + 2));
     write_u2(num_entries);
@@ -491,7 +491,7 @@
 // JVMSpec|       u2 index;
 // JVMSpec|     } local_variable_type_table[local_variable_type_table_length];
 // JVMSpec|   }
-void JvmtiClassFileReconstituter::write_local_variable_type_table_attribute(methodHandle method, u2 num_entries) {
+void JvmtiClassFileReconstituter::write_local_variable_type_table_attribute(const methodHandle& method, u2 num_entries) {
     write_attribute_name_index("LocalVariableTypeTable");
     write_u4(2 + num_entries * (2 + 2 + 2 + 2 + 2));
     write_u2(num_entries);
@@ -519,7 +519,7 @@
 // JSR-202|     u2 number_of_entries;
 // JSR-202|     stack_map_frame_entries[number_of_entries];
 // JSR-202|   }
-void JvmtiClassFileReconstituter::write_stackmap_table_attribute(methodHandle method,
+void JvmtiClassFileReconstituter::write_stackmap_table_attribute(const methodHandle& method,
                                                                  int stackmap_len) {
 
   write_attribute_name_index("StackMapTable");
@@ -538,7 +538,7 @@
 // JVMSpec|     u2 attributes_count;
 // JVMSpec|     attribute_info attributes[attributes_count];
 // JVMSpec|   }
-void JvmtiClassFileReconstituter::write_method_info(methodHandle method) {
+void JvmtiClassFileReconstituter::write_method_info(const methodHandle& method) {
   AccessFlags access_flags = method->access_flags();
   ConstMethod* const_method = method->constMethod();
   u2 generic_signature_index = const_method->generic_signature_index();
@@ -813,7 +813,7 @@
   Bytes::put_Java_u8(writeable_address(8), x);
 }
 
-void JvmtiClassFileReconstituter::copy_bytecodes(methodHandle mh,
+void JvmtiClassFileReconstituter::copy_bytecodes(const methodHandle& mh,
                                                  unsigned char* bytecodes) {
   // use a BytecodeStream to iterate over the bytecodes. JVM/fast bytecodes
   // and the breakpoint bytecode are converted to their original bytecodes.
--- a/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -110,18 +110,18 @@
   void write_class_file_format();
   void write_field_infos();
   void write_method_infos();
-  void write_method_info(methodHandle method);
-  void write_code_attribute(methodHandle method);
+  void write_method_info(const methodHandle& method);
+  void write_code_attribute(const methodHandle& method);
   void write_exceptions_attribute(ConstMethod* const_method);
   void write_synthetic_attribute();
   void write_class_attributes();
   void write_source_file_attribute();
   void write_source_debug_extension_attribute();
-  u2 line_number_table_entries(methodHandle method);
-  void write_line_number_table_attribute(methodHandle method, u2 num_entries);
-  void write_local_variable_table_attribute(methodHandle method, u2 num_entries);
-  void write_local_variable_type_table_attribute(methodHandle method, u2 num_entries);
-  void write_stackmap_table_attribute(methodHandle method, int stackmap_table_len);
+  u2 line_number_table_entries(const methodHandle& method);
+  void write_line_number_table_attribute(const methodHandle& method, u2 num_entries);
+  void write_local_variable_table_attribute(const methodHandle& method, u2 num_entries);
+  void write_local_variable_type_table_attribute(const methodHandle& method, u2 num_entries);
+  void write_stackmap_table_attribute(const methodHandle& method, int stackmap_table_len);
   u2 inner_classes_attribute_length();
   void write_inner_classes_attribute(int length);
   void write_signature_attribute(u2 generic_signaure_index);
@@ -150,7 +150,7 @@
 
   u1* class_file_bytes()      { return _buffer; }
 
-  static void copy_bytecodes(methodHandle method, unsigned char* bytecodes);
+  static void copy_bytecodes(const methodHandle& method, unsigned char* bytecodes);
 };
 
 #endif // SHARE_VM_PRIMS_JVMTICLASSFILERECONSTITUTER_HPP
--- a/hotspot/src/share/vm/prims/jvmtiEnter.xsl	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiEnter.xsl	Fri Oct 30 00:02:37 2015 +0100
@@ -629,8 +629,8 @@
   jint trace_flags = JvmtiTrace::trace_flags(</xsl:text>
       <xsl:value-of select="@num"/>
       <xsl:text>);
-  const char *func_name;
-  const char *curr_thread_name;
+  const char *func_name = NULL;
+  const char *curr_thread_name = NULL;
   if (trace_flags) {
     func_name = JvmtiTrace::function_name(</xsl:text>
       <xsl:value-of select="@num"/>
--- a/hotspot/src/share/vm/prims/jvmtiEnv.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiEnv.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -2139,7 +2139,7 @@
   }
   if (generic_ptr != NULL) {
     *generic_ptr = NULL;
-    if (!isPrimitive && k->oop_is_instance()) {
+    if (!isPrimitive && k->is_instance_klass()) {
       Symbol* soo = InstanceKlass::cast(k)->generic_signature();
       if (soo != NULL) {
         const char *gen_sig = soo->as_C_string();
@@ -2188,7 +2188,7 @@
   Klass* k_klass = java_lang_Class::as_Klass(k_mirror);
   NULL_CHECK(k_klass, JVMTI_ERROR_INVALID_CLASS);
 
-  if (!k_klass->oop_is_instance()) {
+  if (!k_klass->is_instance_klass()) {
     return JVMTI_ERROR_ABSENT_INFORMATION;
   }
 
@@ -2256,7 +2256,7 @@
     return JVMTI_ERROR_CLASS_NOT_PREPARED;
   }
 
-  if (!k->oop_is_instance()) {
+  if (!k->is_instance_klass()) {
     *method_count_ptr = 0;
     *methods_ptr = (jmethodID*) jvmtiMalloc(0 * sizeof(jmethodID));
     return JVMTI_ERROR_NONE;
@@ -2340,7 +2340,7 @@
     return JVMTI_ERROR_CLASS_NOT_PREPARED;
   }
 
-  if (!k->oop_is_instance()) {
+  if (!k->is_instance_klass()) {
     *field_count_ptr = 0;
     *fields_ptr = (jfieldID*) jvmtiMalloc(0 * sizeof(jfieldID));
     return JVMTI_ERROR_NONE;
@@ -2394,7 +2394,7 @@
     if (!(k->jvmti_class_status() & (JVMTI_CLASS_STATUS_PREPARED|JVMTI_CLASS_STATUS_ARRAY) ))
       return JVMTI_ERROR_CLASS_NOT_PREPARED;
 
-    if (!k->oop_is_instance()) {
+    if (!k->is_instance_klass()) {
       *interface_count_ptr = 0;
       *interfaces_ptr = (jclass*) jvmtiMalloc(0 * sizeof(jclass));
       return JVMTI_ERROR_NONE;
@@ -2528,7 +2528,7 @@
     bool result = false;
     if (!java_lang_Class::is_primitive(k_mirror)) {
       Klass* k = java_lang_Class::as_Klass(k_mirror);
-      if (k != NULL && k->oop_is_array()) {
+      if (k != NULL && k->is_array_klass()) {
         result = true;
       }
     }
@@ -2576,7 +2576,7 @@
     }
     Klass* k = java_lang_Class::as_Klass(k_mirror);
     NULL_CHECK(k, JVMTI_ERROR_INVALID_CLASS);
-    if (!k->oop_is_instance()) {
+    if (!k->is_instance_klass()) {
       return JVMTI_ERROR_ABSENT_INFORMATION;
     }
     char* sde = InstanceKlass::cast(k)->source_debug_extension();
@@ -3064,7 +3064,7 @@
     // in thread.cpp.
     JvmtiPendingMonitors::enter(rmonitor);
   } else {
-    int r;
+    int r = 0;
     Thread* thread = Thread::current();
 
     if (thread->is_Java_thread()) {
@@ -3127,7 +3127,7 @@
       err = JVMTI_ERROR_NOT_MONITOR_OWNER;
     }
   } else {
-    int r;
+    int r = 0;
     Thread* thread = Thread::current();
 
     if (thread->is_Java_thread()) {
@@ -3161,7 +3161,7 @@
 // rmonitor - pre-checked for validity
 jvmtiError
 JvmtiEnv::RawMonitorWait(JvmtiRawMonitor * rmonitor, jlong millis) {
-  int r;
+  int r = 0;
   Thread* thread = Thread::current();
 
   if (thread->is_Java_thread()) {
@@ -3220,7 +3220,7 @@
 // rmonitor - pre-checked for validity
 jvmtiError
 JvmtiEnv::RawMonitorNotify(JvmtiRawMonitor * rmonitor) {
-  int r;
+  int r = 0;
   Thread* thread = Thread::current();
 
   if (thread->is_Java_thread()) {
@@ -3251,7 +3251,7 @@
 // rmonitor - pre-checked for validity
 jvmtiError
 JvmtiEnv::RawMonitorNotifyAll(JvmtiRawMonitor * rmonitor) {
-  int r;
+  int r = 0;
   Thread* thread = Thread::current();
 
   if (thread->is_Java_thread()) {
--- a/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -512,7 +512,7 @@
 // mean much better out of memory handling
 unsigned char *
 JvmtiEnvBase::jvmtiMalloc(jlong size) {
-  unsigned char* mem;
+  unsigned char* mem = NULL;
   jvmtiError result = allocate(size, &mem);
   assert(result == JVMTI_ERROR_NONE, "Allocate failed");
   return mem;
@@ -1032,7 +1032,7 @@
     // implied else: entry_count == 0
   }
 
-  jint nWant, nWait;
+  jint nWant = 0, nWait = 0;
   if (mon != NULL) {
     // this object has a heavyweight monitor
     nWant = mon->contentions(); // # of threads contending for monitor
--- a/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -104,7 +104,7 @@
         ClassLoaderData* cld = _scratch_classes[i]->class_loader_data();
         // Free the memory for this class at class unloading time.  Not before
         // because CMS might think this is still live.
-        cld->add_to_deallocate_list((InstanceKlass*)_scratch_classes[i]);
+        cld->add_to_deallocate_list(InstanceKlass::cast(_scratch_classes[i]));
       }
     }
     // Free os::malloc allocated memory in load_new_class_version.
@@ -199,7 +199,7 @@
   }
   Klass* the_class_oop = java_lang_Class::as_Klass(klass_mirror);
   // classes for arrays cannot be redefined
-  if (the_class_oop == NULL || !the_class_oop->oop_is_instance()) {
+  if (the_class_oop == NULL || !the_class_oop->is_instance_klass()) {
     return false;
   }
   return true;
@@ -216,7 +216,7 @@
 // referenced CP entries may already exist in *merge_cp_p in which case
 // there is nothing extra to append and only the current entry is
 // appended.
-void VM_RedefineClasses::append_entry(constantPoolHandle scratch_cp,
+void VM_RedefineClasses::append_entry(const constantPoolHandle& scratch_cp,
        int scratch_i, constantPoolHandle *merge_cp_p, int *merge_cp_length_p,
        TRAPS) {
 
@@ -336,7 +336,7 @@
       int new_name_and_type_ref_i = find_or_append_indirect_entry(scratch_cp, name_and_type_ref_i,
                                                           merge_cp_p, merge_cp_length_p, THREAD);
 
-      const char *entry_name;
+      const char *entry_name = NULL;
       switch (scratch_cp->tag_at(scratch_i).value()) {
       case JVM_CONSTANT_Fieldref:
         entry_name = "Fieldref";
@@ -475,7 +475,7 @@
 } // end append_entry()
 
 
-int VM_RedefineClasses::find_or_append_indirect_entry(constantPoolHandle scratch_cp,
+int VM_RedefineClasses::find_or_append_indirect_entry(const constantPoolHandle& scratch_cp,
       int ref_i, constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS) {
 
   int new_ref_i = ref_i;
@@ -507,7 +507,7 @@
 // Append a bootstrap specifier into the merge_cp operands that is semantically equal
 // to the scratch_cp operands bootstrap specifier passed by the old_bs_i index.
 // Recursively append new merge_cp entries referenced by the new bootstrap specifier.
-void VM_RedefineClasses::append_operand(constantPoolHandle scratch_cp, int old_bs_i,
+void VM_RedefineClasses::append_operand(const constantPoolHandle& scratch_cp, int old_bs_i,
        constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS) {
 
   int old_ref_i = scratch_cp->operand_bootstrap_method_ref_index_at(old_bs_i);
@@ -551,7 +551,7 @@
 } // end append_operand()
 
 
-int VM_RedefineClasses::find_or_append_operand(constantPoolHandle scratch_cp,
+int VM_RedefineClasses::find_or_append_operand(const constantPoolHandle& scratch_cp,
       int old_bs_i, constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS) {
 
   int new_bs_i = old_bs_i; // bootstrap specifier index
@@ -577,7 +577,7 @@
 } // end find_or_append_operand()
 
 
-void VM_RedefineClasses::finalize_operands_merge(constantPoolHandle merge_cp, TRAPS) {
+void VM_RedefineClasses::finalize_operands_merge(const constantPoolHandle& merge_cp, TRAPS) {
   if (merge_cp->operands() == NULL) {
     return;
   }
@@ -910,8 +910,8 @@
 
 // Returns true if the current mismatch is due to a resolved/unresolved
 // class pair. Otherwise, returns false.
-bool VM_RedefineClasses::is_unresolved_class_mismatch(constantPoolHandle cp1,
-       int index1, constantPoolHandle cp2, int index2) {
+bool VM_RedefineClasses::is_unresolved_class_mismatch(const constantPoolHandle& cp1,
+       int index1, const constantPoolHandle& cp2, int index2) {
 
   jbyte t1 = cp1->tag_at(index1).value();
   if (t1 != JVM_CONSTANT_Class && t1 != JVM_CONSTANT_UnresolvedClass) {
@@ -1149,7 +1149,7 @@
 
 // Map old_index to new_index as needed. scratch_cp is only needed
 // for RC_TRACE() calls.
-void VM_RedefineClasses::map_index(constantPoolHandle scratch_cp,
+void VM_RedefineClasses::map_index(const constantPoolHandle& scratch_cp,
        int old_index, int new_index) {
   if (find_new_index(old_index) != 0) {
     // old_index is already mapped
@@ -1195,8 +1195,8 @@
 // scratch_cp to the corresponding entry in *merge_cp_p. Index map
 // entries are only created for entries in scratch_cp that occupy a
 // different location in *merged_cp_p.
-bool VM_RedefineClasses::merge_constant_pools(constantPoolHandle old_cp,
-       constantPoolHandle scratch_cp, constantPoolHandle *merge_cp_p,
+bool VM_RedefineClasses::merge_constant_pools(const constantPoolHandle& old_cp,
+       const constantPoolHandle& scratch_cp, constantPoolHandle *merge_cp_p,
        int *merge_cp_length_p, TRAPS) {
 
   if (merge_cp_p == NULL) {
@@ -1892,7 +1892,7 @@
   }
 
   u2 type_index = rewrite_cp_ref_in_annotation_data(annotations_typeArray,
-                    byte_i_ref, "mapped old type_index=%d", THREAD);
+                    byte_i_ref, "type_index", THREAD);
 
   u2 num_element_value_pairs = Bytes::get_Java_u2((address)
                                  annotations_typeArray->adr_at(byte_i_ref));
@@ -1915,7 +1915,7 @@
 
     u2 element_name_index = rewrite_cp_ref_in_annotation_data(
                               annotations_typeArray, byte_i_ref,
-                              "mapped old element_name_index=%d", THREAD);
+                              "element_name_index", THREAD);
 
     RC_TRACE_WITH_THREAD(0x02000000, THREAD,
       ("element_name_index=%d", element_name_index));
@@ -1939,8 +1939,6 @@
 // annotations_typeArray if needed. Returns the original constant
 // pool reference if a rewrite was not needed or the new constant
 // pool reference if a rewrite was needed.
-PRAGMA_DIAG_PUSH
-PRAGMA_FORMAT_NONLITERAL_IGNORED
 u2 VM_RedefineClasses::rewrite_cp_ref_in_annotation_data(
      AnnotationArray* annotations_typeArray, int &byte_i_ref,
      const char * trace_mesg, TRAPS) {
@@ -1950,14 +1948,13 @@
   u2 old_cp_index = Bytes::get_Java_u2(cp_index_addr);
   u2 new_cp_index = find_new_index(old_cp_index);
   if (new_cp_index != 0) {
-    RC_TRACE_WITH_THREAD(0x02000000, THREAD, (trace_mesg, old_cp_index));
+    RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("mapped old %s=%d", trace_mesg, old_cp_index));
     Bytes::put_Java_u2(cp_index_addr, new_cp_index);
     old_cp_index = new_cp_index;
   }
   byte_i_ref += 2;
   return old_cp_index;
 }
-PRAGMA_DIAG_POP
 
 
 // Rewrite constant pool references in the element_value portion of an
@@ -2022,7 +2019,7 @@
 
       u2 const_value_index = rewrite_cp_ref_in_annotation_data(
                                annotations_typeArray, byte_i_ref,
-                               "mapped old const_value_index=%d", THREAD);
+                               "const_value_index", THREAD);
 
       RC_TRACE_WITH_THREAD(0x02000000, THREAD,
         ("const_value_index=%d", const_value_index));
@@ -2041,11 +2038,11 @@
 
       u2 type_name_index = rewrite_cp_ref_in_annotation_data(
                              annotations_typeArray, byte_i_ref,
-                             "mapped old type_name_index=%d", THREAD);
+                             "type_name_index", THREAD);
 
       u2 const_name_index = rewrite_cp_ref_in_annotation_data(
                               annotations_typeArray, byte_i_ref,
-                              "mapped old const_name_index=%d", THREAD);
+                              "const_name_index", THREAD);
 
       RC_TRACE_WITH_THREAD(0x02000000, THREAD,
         ("type_name_index=%d  const_name_index=%d", type_name_index,
@@ -2065,7 +2062,7 @@
 
       u2 class_info_index = rewrite_cp_ref_in_annotation_data(
                               annotations_typeArray, byte_i_ref,
-                              "mapped old class_info_index=%d", THREAD);
+                              "class_info_index", THREAD);
 
       RC_TRACE_WITH_THREAD(0x02000000, THREAD,
         ("class_info_index=%d", class_info_index));
@@ -2867,7 +2864,7 @@
 // }
 //
 void VM_RedefineClasses::rewrite_cp_refs_in_stack_map_table(
-       methodHandle method, TRAPS) {
+       const methodHandle& method, TRAPS) {
 
   if (!method->has_stackmap_table()) {
     return;
@@ -3339,10 +3336,10 @@
 
   // If the class being redefined is java.lang.Object, we need to fix all
   // array class vtables also
-  if (k->oop_is_array() && _the_class_oop == SystemDictionary::Object_klass()) {
+  if (k->is_array_klass() && _the_class_oop == SystemDictionary::Object_klass()) {
     k->vtable()->adjust_method_entries(the_class, &trace_name_printed);
 
-  } else if (k->oop_is_instance()) {
+  } else if (k->is_instance_klass()) {
     HandleMark hm(_thread);
     InstanceKlass *ik = InstanceKlass::cast(k);
 
@@ -3379,7 +3376,7 @@
     // default_vtable_indices for methods already in the vtable.
     // If redefining Unsafe, walk all the vtables looking for entries.
     if (ik->vtable_length() > 0 && (_the_class_oop->is_interface()
-        || _the_class_oop == SystemDictionary::misc_Unsafe_klass()
+        || _the_class_oop == SystemDictionary::internal_Unsafe_klass()
         || ik->is_subtype_of(_the_class_oop))) {
       // ik->vtable() creates a wrapper object; rm cleans it up
       ResourceMark rm(_thread);
@@ -3396,7 +3393,7 @@
     // subclass relationship between an interface and an InstanceKlass.
     // If redefining Unsafe, walk all the itables looking for entries.
     if (ik->itable_length() > 0 && (_the_class_oop->is_interface()
-        || _the_class_oop == SystemDictionary::misc_Unsafe_klass()
+        || _the_class_oop == SystemDictionary::internal_Unsafe_klass()
         || ik->is_subclass_of(_the_class_oop))) {
       // ik->itable() creates a wrapper object; rm cleans it up
       ResourceMark rm(_thread);
@@ -3443,7 +3440,7 @@
 
 // Clean method data for this class
 void VM_RedefineClasses::MethodDataCleaner::do_klass(Klass* k) {
-  if (k->oop_is_instance()) {
+  if (k->is_instance_klass()) {
     InstanceKlass *ik = InstanceKlass::cast(k);
     // Clean MethodData of this class's methods so they don't refer to
     // old methods that are no longer running.
@@ -4131,9 +4128,9 @@
 
   for (Klass *subk = ik->subklass(); subk != NULL;
        subk = subk->next_sibling()) {
-    if (subk->oop_is_instance()) {
+    if (subk->is_instance_klass()) {
       // Only update instanceKlasses
-      InstanceKlass *subik = (InstanceKlass*)subk;
+      InstanceKlass *subik = InstanceKlass::cast(subk);
       // recursively do subclasses of the current subclass
       increment_class_counter(subik, THREAD);
     }
@@ -4158,7 +4155,7 @@
     no_old_methods = false;
   }
 
-  if (k->oop_is_instance()) {
+  if (k->is_instance_klass()) {
     HandleMark hm(_thread);
     InstanceKlass *ik = InstanceKlass::cast(k);
 
--- a/hotspot/src/share/vm/prims/jvmtiRedefineClasses.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiRedefineClasses.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -420,23 +420,23 @@
   void increment_class_counter(InstanceKlass *ik, TRAPS);
 
   // Support for constant pool merging (these routines are in alpha order):
-  void append_entry(constantPoolHandle scratch_cp, int scratch_i,
+  void append_entry(const constantPoolHandle& scratch_cp, int scratch_i,
     constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS);
-  void append_operand(constantPoolHandle scratch_cp, int scratch_bootstrap_spec_index,
+  void append_operand(const constantPoolHandle& scratch_cp, int scratch_bootstrap_spec_index,
     constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS);
-  void finalize_operands_merge(constantPoolHandle merge_cp, TRAPS);
-  int find_or_append_indirect_entry(constantPoolHandle scratch_cp, int scratch_i,
+  void finalize_operands_merge(const constantPoolHandle& merge_cp, TRAPS);
+  int find_or_append_indirect_entry(const constantPoolHandle& scratch_cp, int scratch_i,
     constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS);
-  int find_or_append_operand(constantPoolHandle scratch_cp, int scratch_bootstrap_spec_index,
+  int find_or_append_operand(const constantPoolHandle& scratch_cp, int scratch_bootstrap_spec_index,
     constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS);
   int find_new_index(int old_index);
   int find_new_operand_index(int old_bootstrap_spec_index);
-  bool is_unresolved_class_mismatch(constantPoolHandle cp1, int index1,
-    constantPoolHandle cp2, int index2);
-  void map_index(constantPoolHandle scratch_cp, int old_index, int new_index);
+  bool is_unresolved_class_mismatch(const constantPoolHandle& cp1, int index1,
+    const constantPoolHandle& cp2, int index2);
+  void map_index(const constantPoolHandle& scratch_cp, int old_index, int new_index);
   void map_operand_index(int old_bootstrap_spec_index, int new_bootstrap_spec_index);
-  bool merge_constant_pools(constantPoolHandle old_cp,
-    constantPoolHandle scratch_cp, constantPoolHandle *merge_cp_p,
+  bool merge_constant_pools(const constantPoolHandle& old_cp,
+    const constantPoolHandle& scratch_cp, constantPoolHandle *merge_cp_p,
     int *merge_cp_length_p, TRAPS);
   jvmtiError merge_cp_and_rewrite(instanceKlassHandle the_class,
     instanceKlassHandle scratch_class, TRAPS);
@@ -480,7 +480,7 @@
     instanceKlassHandle scratch_class, TRAPS);
   bool rewrite_cp_refs_in_methods_type_annotations(
     instanceKlassHandle scratch_class, TRAPS);
-  void rewrite_cp_refs_in_stack_map_table(methodHandle method, TRAPS);
+  void rewrite_cp_refs_in_stack_map_table(const methodHandle& method, TRAPS);
   void rewrite_cp_refs_in_verification_type_info(
          address& stackmap_addr_ref, address stackmap_end, u2 frame_i,
          u1 frame_size, TRAPS);
--- a/hotspot/src/share/vm/prims/jvmtiTagMap.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiTagMap.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -588,7 +588,7 @@
     _obj_tag = (_entry == NULL) ? 0 : _entry->tag();
 
     // get the class and the class's tag value
-    assert(InstanceKlass::cast(SystemDictionary::Class_klass())->is_mirror_instance_klass(), "Is not?");
+    assert(SystemDictionary::Class_klass()->is_mirror_instance_klass(), "Is not?");
 
     _klass_tag = tag_for(tag_map, _o->klass()->java_mirror());
   }
@@ -1118,7 +1118,7 @@
   Klass* klass = java_lang_Class::as_Klass(obj);
 
   // ignore classes for object and type arrays
-  if (!klass->oop_is_instance()) {
+  if (!klass->is_instance_klass()) {
     return 0;
   }
 
@@ -2569,7 +2569,7 @@
       // SystemDictionary::always_strong_oops_do reports the application
       // class loader as a root. We want this root to be reported as
       // a root kind of "OTHER" rather than "SYSTEM_CLASS".
-      if (!o->is_instanceMirror()) {
+      if (!o->is_instance() || !InstanceKlass::cast(o->klass())->is_mirror_instance_klass()) {
         kind = JVMTI_HEAP_REFERENCE_OTHER;
       }
     }
@@ -2821,7 +2821,7 @@
   int i;
   Klass* klass = java_lang_Class::as_Klass(java_class);
 
-  if (klass->oop_is_instance()) {
+  if (klass->is_instance_klass()) {
     InstanceKlass* ik = InstanceKlass::cast(klass);
 
     // Ignore the class if it hasn't been initialized yet
--- a/hotspot/src/share/vm/prims/methodHandles.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/prims/methodHandles.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -148,7 +148,7 @@
     oop clazz = java_lang_reflect_Field::clazz(target_oop); // fd.field_holder()
     int slot  = java_lang_reflect_Field::slot(target_oop);  // fd.index()
     KlassHandle k(thread, java_lang_Class::as_Klass(clazz));
-    if (!k.is_null() && k->oop_is_instance()) {
+    if (!k.is_null() && k->is_instance_klass()) {
       fieldDescriptor fd(InstanceKlass::cast(k()), slot);
       oop mname2 = init_field_MemberName(mname, fd);
       if (mname2 != NULL) {
@@ -164,7 +164,7 @@
     oop clazz  = java_lang_reflect_Method::clazz(target_oop);
     int slot   = java_lang_reflect_Method::slot(target_oop);
     KlassHandle k(thread, java_lang_Class::as_Klass(clazz));
-    if (!k.is_null() && k->oop_is_instance()) {
+    if (!k.is_null() && k->is_instance_klass()) {
       Method* m = InstanceKlass::cast(k())->method_with_idnum(slot);
       if (m == NULL || is_signature_polymorphic(m->intrinsic_id()))
         return NULL;            // do not resolve unless there is a concrete signature
@@ -175,7 +175,7 @@
     oop clazz  = java_lang_reflect_Constructor::clazz(target_oop);
     int slot   = java_lang_reflect_Constructor::slot(target_oop);
     KlassHandle k(thread, java_lang_Class::as_Klass(clazz));
-    if (!k.is_null() && k->oop_is_instance()) {
+    if (!k.is_null() && k->is_instance_klass()) {
       Method* m = InstanceKlass::cast(k())->method_with_idnum(slot);
       if (m == NULL)  return NULL;
       CallInfo info(m, k());
@@ -637,8 +637,8 @@
   {
     Klass* defc_klass = java_lang_Class::as_Klass(defc_oop());
     if (defc_klass == NULL)  return empty;  // a primitive; no resolution possible
-    if (!defc_klass->oop_is_instance()) {
-      if (!defc_klass->oop_is_array())  return empty;
+    if (!defc_klass->is_instance_klass()) {
+      if (!defc_klass->is_array_klass())  return empty;
       defc_klass = SystemDictionary::Object_klass();
     }
     defc = instanceKlassHandle(THREAD, defc_klass);
@@ -804,7 +804,7 @@
   case IS_FIELD:
     {
       assert(vmtarget->is_klass(), "field vmtarget is Klass*");
-      if (!((Klass*) vmtarget)->oop_is_instance())  break;
+      if (!((Klass*) vmtarget)->is_instance_klass())  break;
       instanceKlassHandle defc(THREAD, (Klass*) vmtarget);
       DEBUG_ONLY(vmtarget = NULL);  // safety
       bool is_static = ((flags & JVM_ACC_STATIC) != 0);
@@ -841,7 +841,7 @@
 
   Thread* thread = Thread::current();
 
-  if (k.is_null() || !k->oop_is_instance())  return -1;
+  if (k.is_null() || !k->is_instance_klass())  return -1;
 
   int rfill = 0, rlimit = results->length(), rskip = skip;
   // overflow measurement:
@@ -1164,12 +1164,12 @@
   if (VerifyMethodHandles && caller_jh != NULL &&
       java_lang_invoke_MemberName::clazz(mname()) != NULL) {
     Klass* reference_klass = java_lang_Class::as_Klass(java_lang_invoke_MemberName::clazz(mname()));
-    if (reference_klass != NULL && reference_klass->oop_is_objArray()) {
+    if (reference_klass != NULL && reference_klass->is_objArray_klass()) {
       reference_klass = ObjArrayKlass::cast(reference_klass)->bottom_klass();
     }
 
     // Reflection::verify_class_access can only handle instance classes.
-    if (reference_klass != NULL && reference_klass->oop_is_instance()) {
+    if (reference_klass != NULL && reference_klass->is_instance_klass()) {
       // Emulate LinkResolver::check_klass_accessability.
       Klass* caller = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(caller_jh));
       if (!Reflection::verify_class_access(caller,
--- a/hotspot/src/share/vm/prims/nativeLookup.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/prims/nativeLookup.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -67,7 +67,7 @@
 }
 
 
-char* NativeLookup::pure_jni_name(methodHandle method) {
+char* NativeLookup::pure_jni_name(const methodHandle& method) {
   stringStream st;
   // Prefix
   st.print("Java_");
@@ -80,7 +80,7 @@
 }
 
 
-char* NativeLookup::critical_jni_name(methodHandle method) {
+char* NativeLookup::critical_jni_name(const methodHandle& method) {
   stringStream st;
   // Prefix
   st.print("JavaCritical_");
@@ -93,7 +93,7 @@
 }
 
 
-char* NativeLookup::long_jni_name(methodHandle method) {
+char* NativeLookup::long_jni_name(const methodHandle& method) {
   // Signature ignore the wrapping parenteses and the trailing return type
   stringStream st;
   Symbol* signature = method->signature();
@@ -121,6 +121,7 @@
 #define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)
 
 static JNINativeMethod lookup_special_native_methods[] = {
+  { CC"Java_jdk_internal_misc_Unsafe_registerNatives",             NULL, FN_PTR(JVM_RegisterUnsafeMethods)       },
   { CC"Java_sun_misc_Unsafe_registerNatives",                      NULL, FN_PTR(JVM_RegisterUnsafeMethods)       },
   { CC"Java_java_lang_invoke_MethodHandleNatives_registerNatives", NULL, FN_PTR(JVM_RegisterMethodHandleMethods) },
   { CC"Java_sun_misc_Perf_registerNatives",                        NULL, FN_PTR(JVM_RegisterPerfMethods)         },
@@ -142,7 +143,7 @@
   return NULL;
 }
 
-address NativeLookup::lookup_style(methodHandle method, char* pure_name, const char* long_name, int args_size, bool os_style, bool& in_base_library, TRAPS) {
+address NativeLookup::lookup_style(const methodHandle& method, char* pure_name, const char* long_name, int args_size, bool os_style, bool& in_base_library, TRAPS) {
   address entry;
   // Compute complete JNI name for style
   stringStream st;
@@ -199,7 +200,7 @@
 }
 
 
-address NativeLookup::lookup_critical_style(methodHandle method, char* pure_name, const char* long_name, int args_size, bool os_style) {
+address NativeLookup::lookup_critical_style(const methodHandle& method, char* pure_name, const char* long_name, int args_size, bool os_style) {
   if (!method->has_native_function()) {
     return NULL;
   }
@@ -229,7 +230,7 @@
 
 // Check all the formats of native implementation name to see if there is one
 // for the specified method.
-address NativeLookup::lookup_entry(methodHandle method, bool& in_base_library, TRAPS) {
+address NativeLookup::lookup_entry(const methodHandle& method, bool& in_base_library, TRAPS) {
   address entry = NULL;
   in_base_library = false;
   // Compute pure name
@@ -264,7 +265,7 @@
 
 // Check all the formats of native implementation name to see if there is one
 // for the specified method.
-address NativeLookup::lookup_critical_entry(methodHandle method) {
+address NativeLookup::lookup_critical_entry(const methodHandle& method) {
   if (!CriticalJNINatives) return NULL;
 
   if (method->is_synchronized() ||
@@ -318,7 +319,7 @@
 // If any are found, remove them before attemping the look up of the
 // native implementation again.
 // See SetNativeMethodPrefix in the JVM TI Spec for more details.
-address NativeLookup::lookup_entry_prefixed(methodHandle method, bool& in_base_library, TRAPS) {
+address NativeLookup::lookup_entry_prefixed(const methodHandle& method, bool& in_base_library, TRAPS) {
 #if INCLUDE_JVMTI
   ResourceMark rm(THREAD);
 
@@ -354,7 +355,7 @@
   return NULL;
 }
 
-address NativeLookup::lookup_base(methodHandle method, bool& in_base_library, TRAPS) {
+address NativeLookup::lookup_base(const methodHandle& method, bool& in_base_library, TRAPS) {
   address entry = NULL;
   ResourceMark rm(THREAD);
 
@@ -372,7 +373,7 @@
 }
 
 
-address NativeLookup::lookup(methodHandle method, bool& in_base_library, TRAPS) {
+address NativeLookup::lookup(const methodHandle& method, bool& in_base_library, TRAPS) {
   if (!method->has_native_function()) {
     address entry = lookup_base(method, in_base_library, CHECK_NULL);
     method->set_native_function(entry,
--- a/hotspot/src/share/vm/prims/nativeLookup.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/prims/nativeLookup.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,20 +34,20 @@
 class NativeLookup : AllStatic {
  private:
   // JNI name computation
-  static char* pure_jni_name(methodHandle method);
-  static char* long_jni_name(methodHandle method);
-  static char* critical_jni_name(methodHandle method);
+  static char* pure_jni_name(const methodHandle& method);
+  static char* long_jni_name(const methodHandle& method);
+  static char* critical_jni_name(const methodHandle& method);
 
   // Style specific lookup
-  static address lookup_style(methodHandle method, char* pure_name, const char* long_name, int args_size, bool os_style, bool& in_base_library, TRAPS);
-  static address lookup_critical_style(methodHandle method, char* pure_name, const char* long_name, int args_size, bool os_style);
-  static address lookup_base (methodHandle method, bool& in_base_library, TRAPS);
-  static address lookup_entry(methodHandle method, bool& in_base_library, TRAPS);
-  static address lookup_entry_prefixed(methodHandle method, bool& in_base_library, TRAPS);
+  static address lookup_style(const methodHandle& method, char* pure_name, const char* long_name, int args_size, bool os_style, bool& in_base_library, TRAPS);
+  static address lookup_critical_style(const methodHandle& method, char* pure_name, const char* long_name, int args_size, bool os_style);
+  static address lookup_base (const methodHandle& method, bool& in_base_library, TRAPS);
+  static address lookup_entry(const methodHandle& method, bool& in_base_library, TRAPS);
+  static address lookup_entry_prefixed(const methodHandle& method, bool& in_base_library, TRAPS);
  public:
   // Lookup native function. May throw UnsatisfiedLinkError.
-  static address lookup(methodHandle method, bool& in_base_library, TRAPS);
-  static address lookup_critical_entry(methodHandle method);
+  static address lookup(const methodHandle& method, bool& in_base_library, TRAPS);
+  static address lookup_critical_entry(const methodHandle& method);
 
   // Lookup native functions in base library.
   static address base_library_lookup(const char* class_name, const char* method_name, const char* signature);
--- a/hotspot/src/share/vm/prims/unsafe.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/prims/unsafe.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -45,7 +45,7 @@
 #endif // INCLUDE_ALL_GCS
 
 /*
- *      Implementation of class sun.misc.Unsafe
+ *      Implementation of class Unsafe
  */
 
 
@@ -766,12 +766,12 @@
   }
   oop      mirror = JNIHandles::resolve_non_null(acls);
   Klass* k      = java_lang_Class::as_Klass(mirror);
-  if (k == NULL || !k->oop_is_array()) {
+  if (k == NULL || !k->is_array_klass()) {
     THROW(vmSymbols::java_lang_InvalidClassException());
-  } else if (k->oop_is_objArray()) {
+  } else if (k->is_objArray_klass()) {
     base  = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
     scale = heapOopSize;
-  } else if (k->oop_is_typeArray()) {
+  } else if (k->is_typeArray_klass()) {
     TypeArrayKlass* tak = TypeArrayKlass::cast(k);
     base  = tak->array_header_in_bytes();
     assert(base == arrayOopDesc::base_offset_in_bytes(tak->element_type()), "array_header_size semantics ok");
@@ -783,7 +783,7 @@
 
 UNSAFE_ENTRY(jint, Unsafe_ArrayBaseOffset(JNIEnv *env, jobject unsafe, jclass acls))
   UnsafeWrapper("Unsafe_ArrayBaseOffset");
-  int base, scale;
+  int base = 0, scale = 0;
   getBaseAndScale(base, scale, acls, CHECK_0);
   return field_offset_from_byte_offset(base);
 UNSAFE_END
@@ -791,7 +791,7 @@
 
 UNSAFE_ENTRY(jint, Unsafe_ArrayIndexScale(JNIEnv *env, jobject unsafe, jclass acls))
   UnsafeWrapper("Unsafe_ArrayIndexScale");
-  int base, scale;
+  int base = 0, scale = 0;
   getBaseAndScale(base, scale, acls, CHECK_0);
   // This VM packs both fields and array elements down to the byte.
   // But watch out:  If this changes, so that array references for
--- a/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -467,14 +467,14 @@
 }
 
 // Update the rate and submit compile
-void AdvancedThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread) {
+void AdvancedThresholdPolicy::submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) {
   int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
   update_rate(os::javaTimeMillis(), mh());
   CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", thread);
 }
 
 // Handle the invocation event.
-void AdvancedThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
+void AdvancedThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
                                                       CompLevel level, nmethod* nm, JavaThread* thread) {
   if (should_create_mdo(mh(), level)) {
     create_mdo(mh, thread);
@@ -489,7 +489,7 @@
 
 // Handle the back branch event. Notice that we can compile the method
 // with a regular entry from here.
-void AdvancedThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
+void AdvancedThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh,
                                                        int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
   if (should_create_mdo(mh(), level)) {
     create_mdo(mh, thread);
--- a/hotspot/src/share/vm/runtime/advancedThresholdPolicy.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/advancedThresholdPolicy.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -213,11 +213,11 @@
   jlong start_time() const     { return _start_time; }
 
   // Submit a given method for compilation (and update the rate).
-  virtual void submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread);
+  virtual void submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread);
   // event() from SimpleThresholdPolicy would call these.
-  virtual void method_invocation_event(methodHandle method, methodHandle inlinee,
+  virtual void method_invocation_event(const methodHandle& method, const methodHandle& inlinee,
                                        CompLevel level, nmethod* nm, JavaThread* thread);
-  virtual void method_back_branch_event(methodHandle method, methodHandle inlinee,
+  virtual void method_back_branch_event(const methodHandle& method, const methodHandle& inlinee,
                                         int bci, CompLevel level, nmethod* nm, JavaThread* thread);
 public:
   AdvancedThresholdPolicy() : _start_time(0) { }
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1120,7 +1120,7 @@
                                  Flag::Flags origin) {
   JDK_Version since = JDK_Version();
 
-  if (parse_argument(arg, origin) || ignore_unrecognized) {
+  if (parse_argument(arg, origin)) {
     return true;
   }
 
@@ -1156,7 +1156,7 @@
   Flag* found_flag = Flag::find_flag((const char*)argname, arg_len, true, true);
   if (found_flag != NULL) {
     char locked_message_buf[BUFLEN];
-    found_flag->get_locked_message(locked_message_buf, BUFLEN);
+    Flag::MsgType msg_type = found_flag->get_locked_message(locked_message_buf, BUFLEN);
     if (strlen(locked_message_buf) == 0) {
       if (found_flag->is_bool() && !has_plus_minus) {
         jio_fprintf(defaultStream::error_stream(),
@@ -1169,9 +1169,19 @@
           "Improperly specified VM option '%s'\n", argname);
       }
     } else {
+#ifdef PRODUCT
+      bool mismatched = ((msg_type == Flag::NOTPRODUCT_FLAG_BUT_PRODUCT_BUILD) ||
+                         (msg_type == Flag::DEVELOPER_FLAG_BUT_PRODUCT_BUILD));
+      if (ignore_unrecognized && mismatched) {
+        return true;
+      }
+#endif
       jio_fprintf(defaultStream::error_stream(), "%s", locked_message_buf);
     }
   } else {
+    if (ignore_unrecognized) {
+      return true;
+    }
     jio_fprintf(defaultStream::error_stream(),
                 "Unrecognized VM option '%s'\n", argname);
     Flag* fuzzy_matched = Flag::fuzzy_match((const char*)argname, arg_len, true);
@@ -2469,16 +2479,6 @@
     }
   }
 
-  // Note: only executed in non-PRODUCT mode
-  if (!UseAsyncConcMarkSweepGC &&
-      (ExplicitGCInvokesConcurrent ||
-       ExplicitGCInvokesConcurrentAndUnloadsClasses)) {
-    jio_fprintf(defaultStream::error_stream(),
-                "error: +ExplicitGCInvokesConcurrent[AndUnloadsClasses] conflicts"
-                " with -UseAsyncConcMarkSweepGC");
-    status = false;
-  }
-
   if (PrintNMTStatistics) {
 #if INCLUDE_NMT
     if (MemTracker::tracking_level() == NMT_off) {
@@ -3858,6 +3858,7 @@
 
     JavaVMOption option;
     option.optionString = opt_hd;
+    option.extraInfo = NULL;
 
     options->append(option);                // Fill in option
 
--- a/hotspot/src/share/vm/runtime/compilationPolicy.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/compilationPolicy.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -218,7 +218,7 @@
   return 0;
 }
 
-void NonTieredCompPolicy::reset_counter_for_invocation_event(methodHandle m) {
+void NonTieredCompPolicy::reset_counter_for_invocation_event(const methodHandle& m) {
   // Make sure invocation and backedge counter doesn't overflow again right away
   // as would be the case for native methods.
 
@@ -232,7 +232,7 @@
   assert(!m->was_never_executed(), "don't reset to 0 -- could be mistaken for never-executed");
 }
 
-void NonTieredCompPolicy::reset_counter_for_back_branch_event(methodHandle m) {
+void NonTieredCompPolicy::reset_counter_for_back_branch_event(const methodHandle& m) {
   // Delay next back-branch event but pump up invocation counter to trigger
   // whole method compilation.
   MethodCounters* mcs = m->method_counters();
@@ -283,7 +283,7 @@
                                         CounterHalfLifeTime);
   for (int i = 0; i < classes_per_tick; i++) {
     Klass* k = SystemDictionary::try_get_next_class();
-    if (k != NULL && k->oop_is_instance()) {
+    if (k != NULL && k->is_instance_klass()) {
       InstanceKlass::cast(k)->methods_do(do_method);
     }
   }
@@ -357,7 +357,7 @@
   return (current >= initial + target);
 }
 
-nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, int branch_bci,
+nmethod* NonTieredCompPolicy::event(const methodHandle& method, const methodHandle& inlinee, int branch_bci,
                                     int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread) {
   assert(comp_level == CompLevel_none, "This should be only called from the interpreter");
   NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci));
@@ -416,22 +416,18 @@
 }
 
 #ifndef PRODUCT
-PRAGMA_FORMAT_NONLITERAL_IGNORED_EXTERNAL
-void NonTieredCompPolicy::trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci) {
+void NonTieredCompPolicy::trace_frequency_counter_overflow(const methodHandle& m, int branch_bci, int bci) {
   if (TraceInvocationCounterOverflow) {
     MethodCounters* mcs = m->method_counters();
     assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
     InvocationCounter* ic = mcs->invocation_counter();
     InvocationCounter* bc = mcs->backedge_counter();
     ResourceMark rm;
-    const char* msg =
-      bci == InvocationEntryBci
-      ? "comp-policy cntr ovfl @ %d in entry of "
-      : "comp-policy cntr ovfl @ %d in loop of ";
-PRAGMA_DIAG_PUSH
-PRAGMA_FORMAT_NONLITERAL_IGNORED_INTERNAL
-    tty->print(msg, bci);
-PRAGMA_DIAG_POP
+    if (bci == InvocationEntryBci) {
+      tty->print("comp-policy cntr ovfl @ %d in entry of ", bci);
+    } else {
+      tty->print("comp-policy cntr ovfl @ %d in loop of ", bci);
+    }
     m->print_value();
     tty->cr();
     ic->print();
@@ -448,7 +444,7 @@
   }
 }
 
-void NonTieredCompPolicy::trace_osr_request(methodHandle method, nmethod* osr, int bci) {
+void NonTieredCompPolicy::trace_osr_request(const methodHandle& method, nmethod* osr, int bci) {
   if (TraceOnStackReplacement) {
     ResourceMark rm;
     tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for ");
@@ -460,7 +456,7 @@
 
 // SimpleCompPolicy - compile current method
 
-void SimpleCompPolicy::method_invocation_event(methodHandle m, JavaThread* thread) {
+void SimpleCompPolicy::method_invocation_event(const methodHandle& m, JavaThread* thread) {
   const int comp_level = CompLevel_highest_tier;
   const int hot_count = m->invocation_count();
   reset_counter_for_invocation_event(m);
@@ -474,7 +470,7 @@
   }
 }
 
-void SimpleCompPolicy::method_back_branch_event(methodHandle m, int bci, JavaThread* thread) {
+void SimpleCompPolicy::method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread) {
   const int comp_level = CompLevel_highest_tier;
   const int hot_count = m->backedge_count();
   const char* comment = "backedge_count";
@@ -491,7 +487,7 @@
 
 
 // Consider m for compilation
-void StackWalkCompPolicy::method_invocation_event(methodHandle m, JavaThread* thread) {
+void StackWalkCompPolicy::method_invocation_event(const methodHandle& m, JavaThread* thread) {
   const int comp_level = CompLevel_highest_tier;
   const int hot_count = m->invocation_count();
   reset_counter_for_invocation_event(m);
@@ -530,7 +526,7 @@
   }
 }
 
-void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int bci, JavaThread* thread) {
+void StackWalkCompPolicy::method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread) {
   const int comp_level = CompLevel_highest_tier;
   const int hot_count = m->backedge_count();
   const char* comment = "backedge_count";
@@ -663,7 +659,7 @@
 }
 
 
-const char* StackWalkCompPolicy::shouldInline(methodHandle m, float freq, int cnt) {
+const char* StackWalkCompPolicy::shouldInline(const methodHandle& m, float freq, int cnt) {
   // Allows targeted inlining
   // positive filter: should send be inlined?  returns NULL (--> yes)
   // or rejection msg
@@ -690,7 +686,7 @@
 }
 
 
-const char* StackWalkCompPolicy::shouldNotInline(methodHandle m) {
+const char* StackWalkCompPolicy::shouldNotInline(const methodHandle& m) {
   // negative filter: should send NOT be inlined?  returns NULL (--> inline) or rejection msg
   if (m->is_abstract()) return (_msg = "abstract method");
   // note: we allow ik->is_abstract()
--- a/hotspot/src/share/vm/runtime/compilationPolicy.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/compilationPolicy.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -66,7 +66,7 @@
   virtual int compiler_count(CompLevel comp_level) = 0;
   // main notification entry, return a pointer to an nmethod if the OSR is required,
   // returns NULL otherwise.
-  virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread) = 0;
+  virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread) = 0;
   // safepoint() is called at the end of the safepoint
   virtual void do_safepoint_work() = 0;
   // reprofile request
@@ -91,11 +91,11 @@
 class NonTieredCompPolicy : public CompilationPolicy {
   int _compiler_count;
 protected:
-  static void trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci);
-  static void trace_osr_request(methodHandle method, nmethod* osr, int bci);
+  static void trace_frequency_counter_overflow(const methodHandle& m, int branch_bci, int bci);
+  static void trace_osr_request(const methodHandle& method, nmethod* osr, int bci);
   static void trace_osr_completion(nmethod* osr_nm);
-  void reset_counter_for_invocation_event(methodHandle method);
-  void reset_counter_for_back_branch_event(methodHandle method);
+  void reset_counter_for_invocation_event(const methodHandle& method);
+  void reset_counter_for_back_branch_event(const methodHandle& method);
 public:
   NonTieredCompPolicy() : _compiler_count(0) { }
   virtual CompLevel initial_compile_level() { return CompLevel_highest_tier; }
@@ -107,15 +107,15 @@
   virtual bool is_mature(Method* method);
   virtual void initialize();
   virtual CompileTask* select_task(CompileQueue* compile_queue);
-  virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread);
-  virtual void method_invocation_event(methodHandle m, JavaThread* thread) = 0;
-  virtual void method_back_branch_event(methodHandle m, int bci, JavaThread* thread) = 0;
+  virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread);
+  virtual void method_invocation_event(const methodHandle& m, JavaThread* thread) = 0;
+  virtual void method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread) = 0;
 };
 
 class SimpleCompPolicy : public NonTieredCompPolicy {
  public:
-  virtual void method_invocation_event(methodHandle m, JavaThread* thread);
-  virtual void method_back_branch_event(methodHandle m, int bci, JavaThread* thread);
+  virtual void method_invocation_event(const methodHandle& m, JavaThread* thread);
+  virtual void method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread);
 };
 
 // StackWalkCompPolicy - existing C2 policy
@@ -123,8 +123,8 @@
 #ifdef COMPILER2
 class StackWalkCompPolicy : public NonTieredCompPolicy {
  public:
-  virtual void method_invocation_event(methodHandle m, JavaThread* thread);
-  virtual void method_back_branch_event(methodHandle m, int bci, JavaThread* thread);
+  virtual void method_invocation_event(const methodHandle& m, JavaThread* thread);
+  virtual void method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread);
 
  private:
   RFrame* findTopInlinableFrame(GrowableArray<RFrame*>* stack);
@@ -134,9 +134,9 @@
   // they are used for performance debugging only (print better messages)
   static const char* _msg;            // reason for not inlining
 
-  static const char* shouldInline   (methodHandle callee, float frequency, int cnt);
+  static const char* shouldInline   (const methodHandle& callee, float frequency, int cnt);
   // positive filter: should send be inlined?  returns NULL (--> yes) or rejection msg
-  static const char* shouldNotInline(methodHandle callee);
+  static const char* shouldNotInline(const methodHandle& callee);
   // negative filter: should send NOT be inlined?  returns NULL (--> inline) or rejection msg
 
 };
--- a/hotspot/src/share/vm/runtime/deoptimization.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/deoptimization.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -758,15 +758,15 @@
     KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()));
     oop obj = NULL;
 
-    if (k->oop_is_instance()) {
+    if (k->is_instance_klass()) {
       InstanceKlass* ik = InstanceKlass::cast(k());
       obj = ik->allocate_instance(THREAD);
-    } else if (k->oop_is_typeArray()) {
+    } else if (k->is_typeArray_klass()) {
       TypeArrayKlass* ak = TypeArrayKlass::cast(k());
       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
       int len = sv->field_size() / type2size[ak->element_type()];
       obj = ak->allocate(len, THREAD);
-    } else if (k->oop_is_objArray()) {
+    } else if (k->is_objArray_klass()) {
       ObjArrayKlass* ak = ObjArrayKlass::cast(k());
       obj = ak->allocate(sv->field_size(), THREAD);
     }
@@ -1010,13 +1010,13 @@
       continue;
     }
 
-    if (k->oop_is_instance()) {
+    if (k->is_instance_klass()) {
       InstanceKlass* ik = InstanceKlass::cast(k());
       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);
-    } else if (k->oop_is_typeArray()) {
+    } else if (k->is_typeArray_klass()) {
       TypeArrayKlass* ak = TypeArrayKlass::cast(k());
       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
-    } else if (k->oop_is_objArray()) {
+    } else if (k->is_objArray_klass()) {
       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
     }
   }
@@ -1345,7 +1345,7 @@
 }
 
 #if defined(COMPILER2) || defined(SHARK) || INCLUDE_JVMCI
-void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index, TRAPS) {
+void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index, TRAPS) {
   // in case of an unresolved klass entry, load the class.
   if (constant_pool->tag_at(index).is_unresolved_klass()) {
     Klass* tk = constant_pool->klass_at_ignore_error(index, CHECK);
@@ -1376,7 +1376,7 @@
 }
 
 
-void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index) {
+void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index) {
   EXCEPTION_MARK;
   load_class_by_index(constant_pool, index, THREAD);
   if (HAS_PENDING_EXCEPTION) {
--- a/hotspot/src/share/vm/runtime/deoptimization.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/deoptimization.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -420,8 +420,8 @@
                                                bool& ret_maybe_prior_trap,
                                                bool& ret_maybe_prior_recompile);
   // class loading support for uncommon trap
-  static void load_class_by_index(constantPoolHandle constant_pool, int index, TRAPS);
-  static void load_class_by_index(constantPoolHandle constant_pool, int index);
+  static void load_class_by_index(const constantPoolHandle& constant_pool, int index, TRAPS);
+  static void load_class_by_index(const constantPoolHandle& constant_pool, int index);
 
   static UnrollBlock* fetch_unroll_info_helper(JavaThread* thread);
 
--- a/hotspot/src/share/vm/runtime/frame.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/frame.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -442,6 +442,14 @@
   char* description;
   int owner;
   int priority;
+
+  FrameValue() {
+    location = NULL;
+    description = NULL;
+    owner = -1;
+    priority = 0;
+  }
+
 };
 
 
--- a/hotspot/src/share/vm/runtime/globals.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/globals.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -306,35 +306,36 @@
   _flags = Flags(_flags & ~KIND_DIAGNOSTIC);
 }
 
-// Get custom message for this locked flag, or return NULL if
-// none is available.
-void Flag::get_locked_message(char* buf, int buflen) const {
+// Get custom message for this locked flag, or NULL if
+// none is available. Returns message type produced.
+Flag::MsgType Flag::get_locked_message(char* buf, int buflen) const {
   buf[0] = '\0';
   if (is_diagnostic() && !is_unlocked()) {
     jio_snprintf(buf, buflen,
                  "Error: VM option '%s' is diagnostic and must be enabled via -XX:+UnlockDiagnosticVMOptions.\n"
                  "Error: The unlock option must precede '%s'.\n",
                  _name, _name);
-    return;
+    return Flag::DIAGNOSTIC_FLAG_BUT_LOCKED;
   }
   if (is_experimental() && !is_unlocked()) {
     jio_snprintf(buf, buflen,
                  "Error: VM option '%s' is experimental and must be enabled via -XX:+UnlockExperimentalVMOptions.\n"
                  "Error: The unlock option must precede '%s'.\n",
                  _name, _name);
-    return;
+    return Flag::EXPERIMENTAL_FLAG_BUT_LOCKED;
   }
   if (is_develop() && is_product_build()) {
     jio_snprintf(buf, buflen, "Error: VM option '%s' is develop and is available only in debug version of VM.\n",
                  _name);
-    return;
+    return Flag::DEVELOPER_FLAG_BUT_PRODUCT_BUILD;
   }
   if (is_notproduct() && is_product_build()) {
     jio_snprintf(buf, buflen, "Error: VM option '%s' is notproduct and is available only in debug version of VM.\n",
                  _name);
-    return;
+    return Flag::NOTPRODUCT_FLAG_BUT_PRODUCT_BUILD;
   }
   get_locked_message_ext(buf, buflen);
+  return Flag::NONE;
 }
 
 bool Flag::is_writeable() const {
@@ -348,11 +349,6 @@
   return is_manageable() || is_external_ext();
 }
 
-
-// Length of format string (e.g. "%.1234s") for printing ccstr below
-#define FORMAT_BUFFER_LEN 16
-
-PRAGMA_FORMAT_NONLITERAL_IGNORED_EXTERNAL
 void Flag::print_on(outputStream* st, bool withComments, bool printRanges) {
   // Don't print notproduct and develop flags in a product build.
   if (is_constant_in_binary()) {
@@ -384,14 +380,8 @@
       if (cp != NULL) {
         const char* eol;
         while ((eol = strchr(cp, '\n')) != NULL) {
-          char format_buffer[FORMAT_BUFFER_LEN];
           size_t llen = pointer_delta(eol, cp, sizeof(char));
-          jio_snprintf(format_buffer, FORMAT_BUFFER_LEN,
-                       "%%." SIZE_FORMAT "s", llen);
-          PRAGMA_DIAG_PUSH
-          PRAGMA_FORMAT_NONLITERAL_IGNORED_INTERNAL
-          st->print(format_buffer, cp);
-          PRAGMA_DIAG_POP
+          st->print("%.*s", (int)llen, cp);
           st->cr();
           cp = eol+1;
           st->print("%5s %-35s += ", "", _name);
--- a/hotspot/src/share/vm/runtime/globals.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -282,6 +282,14 @@
     ERR_OTHER
   };
 
+  enum MsgType {
+    NONE = 0,
+    DIAGNOSTIC_FLAG_BUT_LOCKED,
+    EXPERIMENTAL_FLAG_BUT_LOCKED,
+    DEVELOPER_FLAG_BUT_PRODUCT_BUILD,
+    NOTPRODUCT_FLAG_BUT_PRODUCT_BUILD
+  };
+
   const char* _type;
   const char* _name;
   void* _addr;
@@ -367,7 +375,7 @@
 
   void unlock_diagnostic();
 
-  void get_locked_message(char*, int) const;
+  Flag::MsgType get_locked_message(char*, int) const;
   void get_locked_message_ext(char*, int) const;
 
   // printRanges will print out flags type, name and range values as expected by -XX:+PrintFlagsRanges
@@ -804,7 +812,7 @@
           "Inline Thread.currentThread, etc")                               \
                                                                             \
   develop(bool, InlineUnsafeOps, true,                                      \
-          "Inline memory ops (native methods) from sun.misc.Unsafe")        \
+          "Inline memory ops (native methods) from Unsafe")                 \
                                                                             \
   product(bool, CriticalJNINatives, true,                                   \
           "Check for critical JNI entry points")                            \
@@ -1622,12 +1630,6 @@
           "Number of times to retry allocations when "                      \
           "blocked by the GC locker")                                       \
                                                                             \
-  develop(bool, UseCMSAdaptiveFreeLists, true,                              \
-          "Use adaptive free lists in the CMS generation")                  \
-                                                                            \
-  develop(bool, UseAsyncConcMarkSweepGC, true,                              \
-          "Use Asynchronous Concurrent Mark-Sweep GC in the old generation")\
-                                                                            \
   product(bool, UseCMSBestFit, true,                                        \
           "Use CMS best fit allocation strategy")                           \
                                                                             \
@@ -1822,10 +1824,6 @@
           "When CMS class unloading is enabled, the maximum CMS cycle "     \
           "count for which classes may not be unloaded")                    \
                                                                             \
-  develop(intx, CMSDictionaryChoice, 0,                                     \
-          "Use BinaryTreeDictionary as default in the CMS generation")      \
-          range(0, 2)                                                       \
-                                                                            \
   product(uintx, CMSIndexedFreeListReplenish, 4,                            \
           "Replenish an indexed free list with this number of chunks")      \
           range(1, max_uintx)                                               \
@@ -1840,9 +1838,6 @@
   product(bool, CMSLoopWarn, false,                                         \
           "Warn in case of excessive CMS looping")                          \
                                                                             \
-  develop(bool, CMSOverflowEarlyRestoration, false,                         \
-          "Restore preserved marks early")                                  \
-                                                                            \
   /* where does the range max value of (max_jint - 1) come from? */         \
   product(size_t, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M),         \
           "Maximum size of marking stack")                                  \
@@ -2080,10 +2075,6 @@
           "unloading of classes when class unloading is enabled")           \
           range(0, 100)                                                     \
                                                                             \
-  develop(bool, CMSTestInFreeList, false,                                   \
-          "Check if the coalesced range is already in the "                 \
-          "free lists as claimed")                                          \
-                                                                            \
   notproduct(bool, CMSVerifyReturnedBytes, false,                           \
           "Check that all the garbage collected was returned to the "       \
           "free lists")                                                     \
@@ -4250,7 +4241,7 @@
           "Use locked-tracing when doing event-based tracing")              \
                                                                             \
   diagnostic(bool, UseUnalignedAccesses, false,                             \
-          "Use unaligned memory accesses in sun.misc.Unsafe")               \
+          "Use unaligned memory accesses in Unsafe")                        \
                                                                             \
   product_pd(bool, PreserveFramePointer,                                    \
              "Use the FP register for holding the frame pointer "           \
--- a/hotspot/src/share/vm/runtime/handles.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/handles.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -212,7 +212,8 @@
 }
 
 bool instanceKlassHandle::is_instanceKlass(const Klass* k) {
-  return k->oop_is_instance();
+  // Need this to avoid circular include dependency
+  return k->is_instance_klass();
 }
 
 #endif
--- a/hotspot/src/share/vm/runtime/java.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/java.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -575,6 +575,11 @@
 void vm_abort(bool dump_core) {
   vm_perform_shutdown_actions();
   os::wait_for_keypress_at_exit();
+
+  // Flush stdout and stderr before abort.
+  fflush(stdout);
+  fflush(stderr);
+
   os::abort(dump_core);
   ShouldNotReachHere();
 }
--- a/hotspot/src/share/vm/runtime/javaCalls.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/javaCalls.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -289,21 +289,20 @@
 // Implementation of JavaCalls (low level)
 
 
-void JavaCalls::call(JavaValue* result, methodHandle method, JavaCallArguments* args, TRAPS) {
+void JavaCalls::call(JavaValue* result, const methodHandle& method, JavaCallArguments* args, TRAPS) {
   // Check if we need to wrap a potential OS exception handler around thread
   // This is used for e.g. Win32 structured exception handlers
   assert(THREAD->is_Java_thread(), "only JavaThreads can make JavaCalls");
   // Need to wrap each and every time, since there might be native code down the
   // stack that has installed its own exception handlers
-  os::os_exception_wrapper(call_helper, result, &method, args, THREAD);
+  os::os_exception_wrapper(call_helper, result, method, args, THREAD);
 }
 
-void JavaCalls::call_helper(JavaValue* result, methodHandle* m, JavaCallArguments* args, TRAPS) {
+void JavaCalls::call_helper(JavaValue* result, const methodHandle& method, JavaCallArguments* args, TRAPS) {
   // During dumping, Java execution environment is not fully initialized. Also, Java execution
   // may cause undesirable side-effects in the class metadata.
   assert(!DumpSharedSpaces, "must not execute Java bytecodes when dumping");
 
-  methodHandle method = *m;
   JavaThread* thread = (JavaThread*)THREAD;
   assert(thread->is_Java_thread(), "must be called by a java thread");
   assert(method.not_null(), "must have a method to call");
@@ -546,7 +545,7 @@
 };
 
 
-void JavaCallArguments::verify(methodHandle method, BasicType return_type,
+void JavaCallArguments::verify(const methodHandle& method, BasicType return_type,
   Thread *thread) {
   guarantee(method->size_of_parameters() == size_of_parameters(), "wrong no. of arguments pushed");
 
--- a/hotspot/src/share/vm/runtime/javaCalls.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/javaCalls.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -189,7 +189,7 @@
   int   size_of_parameters() const { return _size; }
 
   // Verify that pushed arguments fits a given method
-  void verify(methodHandle method, BasicType return_type, Thread *thread);
+  void verify(const methodHandle& method, BasicType return_type, Thread *thread);
 };
 
 // All calls to Java have to go via JavaCalls. Sets up the stack frame
@@ -197,7 +197,7 @@
 //
 
 class JavaCalls: AllStatic {
-  static void call_helper(JavaValue* result, methodHandle* method, JavaCallArguments* args, TRAPS);
+  static void call_helper(JavaValue* result, const methodHandle& method, JavaCallArguments* args, TRAPS);
  public:
   // call_special
   // ------------
@@ -227,7 +227,7 @@
   static void call_static(JavaValue* result, KlassHandle klass, Symbol* name, Symbol* signature, Handle arg1, Handle arg2, TRAPS);
 
   // Low-level interface
-  static void call(JavaValue* result, methodHandle method, JavaCallArguments* args, TRAPS);
+  static void call(JavaValue* result, const methodHandle& method, JavaCallArguments* args, TRAPS);
 };
 
 #endif // SHARE_VM_RUNTIME_JAVACALLS_HPP
--- a/hotspot/src/share/vm/runtime/os.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/os.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -96,8 +96,7 @@
   // Output will be of the form "YYYY-MM-DDThh:mm:ss.mmm+zzzz\0"
   //                                      1         2
   //                             12345678901234567890123456789
-  static const char* iso8601_format =
-    "%04d-%02d-%02dT%02d:%02d:%02d.%03d%c%02d%02d";
+  // format string: "%04d-%02d-%02dT%02d:%02d:%02d.%03d%c%02d%02d"
   static const size_t needed_buffer = 29;
 
   // Sanity check the arguments
@@ -158,7 +157,8 @@
   // Print an ISO 8601 date and time stamp into the buffer
   const int year = 1900 + time_struct.tm_year;
   const int month = 1 + time_struct.tm_mon;
-  const int printed = jio_snprintf(buffer, buffer_length, iso8601_format,
+  const int printed = jio_snprintf(buffer, buffer_length,
+                                   "%04d-%02d-%02dT%02d:%02d:%02d.%03d%c%02d%02d",
                                    year,
                                    month,
                                    time_struct.tm_mday,
@@ -1394,7 +1394,7 @@
 // Returns true if the current stack pointer is above the stack shadow
 // pages, false otherwise.
 
-bool os::stack_shadow_pages_available(Thread *thread, methodHandle method) {
+bool os::stack_shadow_pages_available(Thread *thread, const methodHandle& method) {
   assert(StackRedPages > 0 && StackYellowPages > 0,"Sanity check");
   address sp = current_stack_pointer();
   // Check if we have StackShadowPages above the yellow zone.  This parameter
--- a/hotspot/src/share/vm/runtime/os.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/os.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -96,7 +96,7 @@
 const bool ExecMem = true;
 
 // Typedef for structured exception handling support
-typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
+typedef void (*java_call_t)(JavaValue* value, const methodHandle& method, JavaCallArguments* args, Thread* thread);
 
 class MallocTracker;
 
@@ -263,7 +263,7 @@
   static bool uses_stack_guard_pages();
   static bool allocate_stack_guard_pages();
   static void bang_stack_shadow_pages();
-  static bool stack_shadow_pages_available(Thread *thread, methodHandle method);
+  static bool stack_shadow_pages_available(Thread *thread, const methodHandle& method);
 
   // OS interface to Virtual Memory
 
@@ -722,7 +722,7 @@
   static void init_random(long initval);   // initialize random sequence
 
   // Structured OS Exception support
-  static void os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
+  static void os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method, JavaCallArguments* args, Thread* thread);
 
   // On Posix compatible OS it will simply check core dump limits while on Windows
   // it will check if dump file can be created. Check or prepare a core dump to be
--- a/hotspot/src/share/vm/runtime/park.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/park.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -121,22 +121,13 @@
 
     // Current association
     Thread * AssociatedWith ;
-    intptr_t RawThreadIdentity ;        // LWPID etc
-    volatile int Incarnation ;
-
-    // diagnostic : keep track of last thread to wake this thread.
-    // this is useful for construction of dependency graphs.
-    void * LastWaker ;
 
   public:
     // MCS-CLH list linkage and Native Mutex/Monitor
     ParkEvent * volatile ListNext ;
-    ParkEvent * volatile ListPrev ;
     volatile intptr_t OnList ;
     volatile int TState ;
     volatile int Notified ;             // for native monitor construct
-    volatile int IsWaiting ;            // Enqueued on WaitSet
-
 
   private:
     static ParkEvent * volatile FreeList ;
@@ -155,11 +146,9 @@
        AssociatedWith = NULL ;
        FreeNext       = NULL ;
        ListNext       = NULL ;
-       ListPrev       = NULL ;
        OnList         = 0 ;
        TState         = 0 ;
        Notified       = 0 ;
-       IsWaiting      = 0 ;
     }
 
     // We use placement-new to force ParkEvent instances to be
--- a/hotspot/src/share/vm/runtime/reflection.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/reflection.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -330,7 +330,7 @@
     return TypeArrayKlass::cast(tak)->allocate(length, THREAD);
   } else {
     Klass* k = java_lang_Class::as_Klass(element_mirror);
-    if (k->oop_is_array() && ArrayKlass::cast(k)->dimension() >= MAX_DIM) {
+    if (k->is_array_klass() && ArrayKlass::cast(k)->dimension() >= MAX_DIM) {
       THROW_0(vmSymbols::java_lang_IllegalArgumentException());
     }
     return oopFactory::new_objArray(k, length, THREAD);
@@ -366,7 +366,7 @@
     klass = basic_type_mirror_to_arrayklass(element_mirror, CHECK_NULL);
   } else {
     klass = java_lang_Class::as_Klass(element_mirror);
-    if (klass->oop_is_array()) {
+    if (klass->is_array_klass()) {
       int k_dim = ArrayKlass::cast(klass)->dimension();
       if (k_dim + len > MAX_DIM) {
         THROW_0(vmSymbols::java_lang_IllegalArgumentException());
@@ -387,7 +387,7 @@
   }
 
   Klass* klass = java_lang_Class::as_Klass(mirror);
-  if (!klass->oop_is_array()) {
+  if (!klass->is_array_klass()) {
     return NULL;
   }
 
@@ -395,14 +395,14 @@
 #ifdef ASSERT
   oop result2 = NULL;
   if (ArrayKlass::cast(klass)->dimension() == 1) {
-    if (klass->oop_is_typeArray()) {
+    if (klass->is_typeArray_klass()) {
       result2 = basic_type_arrayklass_to_mirror(klass, CHECK_NULL);
     } else {
       result2 = ObjArrayKlass::cast(klass)->element_klass()->java_mirror();
     }
   } else {
     Klass* lower_dim = ArrayKlass::cast(klass)->lower_dimension();
-    assert(lower_dim->oop_is_array(), "just checking");
+    assert(lower_dim->is_array_klass(), "just checking");
     result2 = lower_dim->java_mirror();
   }
   assert(result == result2, "results must be consistent");
@@ -495,7 +495,7 @@
   }
 
   Klass* host_class = current_class;
-  while (host_class->oop_is_instance() &&
+  while (host_class->is_instance_klass() &&
          InstanceKlass::cast(host_class)->is_anonymous()) {
     Klass* next_host_class = InstanceKlass::cast(host_class)->host_klass();
     if (next_host_class == NULL)  break;
@@ -612,7 +612,7 @@
 }
 
 
-objArrayHandle Reflection::get_parameter_types(methodHandle method, int parameter_count, oop* return_type, TRAPS) {
+objArrayHandle Reflection::get_parameter_types(const methodHandle& method, int parameter_count, oop* return_type, TRAPS) {
   // Allocate array holding parameter types (java.lang.Class instances)
   objArrayOop m = oopFactory::new_objArray(SystemDictionary::Class_klass(), parameter_count, CHECK_(objArrayHandle()));
   objArrayHandle mirrors (THREAD, m);
@@ -635,7 +635,7 @@
   return mirrors;
 }
 
-objArrayHandle Reflection::get_exception_types(methodHandle method, TRAPS) {
+objArrayHandle Reflection::get_exception_types(const methodHandle& method, TRAPS) {
   return method->resolved_checked_exceptions(THREAD);
 }
 
@@ -647,11 +647,9 @@
     return Handle(THREAD, Universe::java_mirror(type));
   }
 
-  oop loader = InstanceKlass::cast(k())->class_loader();
-  oop protection_domain = k()->protection_domain();
   Klass* result = SystemDictionary::resolve_or_fail(signature,
-                                    Handle(THREAD, loader),
-                                    Handle(THREAD, protection_domain),
+                                    Handle(THREAD, k->class_loader()),
+                                    Handle(THREAD, k->protection_domain()),
                                     true, CHECK_(Handle()));
 
   if (TraceClassResolution) {
@@ -663,7 +661,7 @@
 }
 
 
-oop Reflection::new_method(methodHandle method, bool for_constant_pool_access, TRAPS) {
+oop Reflection::new_method(const methodHandle& method, bool for_constant_pool_access, TRAPS) {
   // Allow sun.reflect.ConstantPool to refer to <clinit> methods as java.lang.reflect.Methods.
   assert(!method()->is_initializer() ||
          (for_constant_pool_access && method()->is_static()),
@@ -726,7 +724,7 @@
 }
 
 
-oop Reflection::new_constructor(methodHandle method, TRAPS) {
+oop Reflection::new_constructor(const methodHandle& method, TRAPS) {
   assert(method()->is_initializer(), "should call new_method instead");
 
   instanceKlassHandle  holder (THREAD, method->method_holder());
@@ -824,7 +822,7 @@
 }
 
 
-methodHandle Reflection::resolve_interface_call(instanceKlassHandle klass, methodHandle method,
+methodHandle Reflection::resolve_interface_call(instanceKlassHandle klass, const methodHandle& method,
                                                 KlassHandle recv_klass, Handle receiver, TRAPS) {
   assert(!method.is_null() , "method should not be null");
 
@@ -839,7 +837,7 @@
 }
 
 
-oop Reflection::invoke(instanceKlassHandle klass, methodHandle reflected_method,
+oop Reflection::invoke(instanceKlassHandle klass, const methodHandle& reflected_method,
                        Handle receiver, bool override, objArrayHandle ptypes,
                        BasicType rtype, objArrayHandle args, bool is_method_invoke, TRAPS) {
   ResourceMark rm(THREAD);
--- a/hotspot/src/share/vm/runtime/reflection.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/reflection.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -48,8 +48,8 @@
   static Klass* basic_type_mirror_to_arrayklass(oop basic_type_mirror, TRAPS);
   static oop      basic_type_arrayklass_to_mirror(Klass* basic_type_arrayklass, TRAPS);
 
-  static objArrayHandle get_parameter_types(methodHandle method, int parameter_count, oop* return_type, TRAPS);
-  static objArrayHandle get_exception_types(methodHandle method, TRAPS);
+  static objArrayHandle get_parameter_types(const methodHandle& method, int parameter_count, oop* return_type, TRAPS);
+  static objArrayHandle get_exception_types(const methodHandle& method, TRAPS);
   // Creating new java.lang.reflect.xxx wrappers
   static Handle new_type(Symbol* signature, KlassHandle k, TRAPS);
 
@@ -110,9 +110,9 @@
   //
 
   // Create a java.lang.reflect.Method object based on a method
-  static oop new_method(methodHandle method, bool for_constant_pool_access, TRAPS);
+  static oop new_method(const methodHandle& method, bool for_constant_pool_access, TRAPS);
   // Create a java.lang.reflect.Constructor object based on a method
-  static oop new_constructor(methodHandle method, TRAPS);
+  static oop new_constructor(const methodHandle& method, TRAPS);
   // Create a java.lang.reflect.Field object based on a field descriptor
   static oop new_field(fieldDescriptor* fd, TRAPS);
   // Create a java.lang.reflect.Parameter object based on a
@@ -122,9 +122,16 @@
 
 private:
   // method resolution for invoke
-  static methodHandle resolve_interface_call(instanceKlassHandle klass, methodHandle method, KlassHandle recv_klass, Handle receiver, TRAPS);
+  static methodHandle resolve_interface_call(instanceKlassHandle klass, const methodHandle& method, KlassHandle recv_klass, Handle receiver, TRAPS);
   // Method call (shared by invoke_method and invoke_constructor)
-  static oop  invoke(instanceKlassHandle klass, methodHandle method, Handle receiver, bool override, objArrayHandle ptypes, BasicType rtype, objArrayHandle args, bool is_method_invoke, TRAPS);
+  static oop  invoke(instanceKlassHandle klass,
+                     const methodHandle& method,
+                     Handle receiver,
+                     bool override,
+                     objArrayHandle ptypes,
+                     BasicType rtype,
+                     objArrayHandle args,
+                     bool is_method_invoke, TRAPS);
 
   // Narrowing of basic types. Used to create correct jvalues for
   // boolean, byte, char and short return return values from interpreter
--- a/hotspot/src/share/vm/runtime/relocator.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/relocator.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -125,7 +125,7 @@
 //-----------------------------------------------------------------------------------------------------------
 // Relocator code
 
-Relocator::Relocator(methodHandle m, RelocatorListener* listener) {
+Relocator::Relocator(const methodHandle& m, RelocatorListener* listener) {
   set_method(m);
   set_code_length(method()->code_size());
   set_code_array(NULL);
--- a/hotspot/src/share/vm/runtime/relocator.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/relocator.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,7 +44,7 @@
 
 class Relocator : public ResourceObj {
  public:
-  Relocator(methodHandle method, RelocatorListener* listener);
+  Relocator(const methodHandle& method, RelocatorListener* listener);
   methodHandle insert_space_at(int bci, int space, u_char inst_buffer[], TRAPS);
 
   // Callbacks from ChangeItem's
@@ -81,7 +81,7 @@
   void set_compressed_line_number_table_size(int size)        { _compressed_line_number_table_size = size; }
 
   methodHandle method() const               { return _method; }
-  void set_method(methodHandle method)      { _method = method; }
+  void set_method(const methodHandle& method)      { _method = method; }
 
   // This will return a raw bytecode, which is possibly rewritten.
   Bytecodes::Code code_at(int bci) const          { return (Bytecodes::Code) code_array()[bci]; }
--- a/hotspot/src/share/vm/runtime/safepoint.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/safepoint.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -122,7 +122,7 @@
 
   // Save the starting time, so that it can be compared to see if this has taken
   // too long to complete.
-  jlong safepoint_limit_time;
+  jlong safepoint_limit_time = 0;
   timeout_error_printed = false;
 
   // PrintSafepointStatisticsTimeout can be specified separately. When
@@ -901,7 +901,7 @@
 
 
 void ThreadSafepointState::print_on(outputStream *st) const {
-  const char *s;
+  const char *s = NULL;
 
   switch(_type) {
     case _running                : s = "_running";              break;
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1142,7 +1142,7 @@
            callee->is_method_handle_intrinsic() ||
            callee->is_compiled_lambda_form(),
            "actual receiver must be subclass of static receiver klass");
-    if (receiver_klass->oop_is_instance()) {
+    if (receiver_klass->is_instance_klass()) {
       if (InstanceKlass::cast(receiver_klass())->is_not_initialized()) {
         tty->print_cr("ERROR: Klass not yet initialized!!");
         receiver_klass()->print();
@@ -1697,7 +1697,7 @@
 }
 
 #ifdef ASSERT
-void SharedRuntime::check_member_name_argument_is_last_argument(methodHandle method,
+void SharedRuntime::check_member_name_argument_is_last_argument(const methodHandle& method,
                                                                 const BasicType* sig_bt,
                                                                 const VMRegPair* regs) {
   ResourceMark rm;
@@ -2430,7 +2430,7 @@
   return _adapters->new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 }
 
-AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(methodHandle method) {
+AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
   // Use customized signature handler.  Need to lock around updates to
   // the AdapterHandlerTable (it is not safe for concurrent readers
   // and a single writer: this could be fixed if it becomes a
@@ -2640,7 +2640,7 @@
  * arguments, and transitions to native.  On return from the native we transition
  * back to java blocking if a safepoint is in progress.
  */
-void AdapterHandlerLibrary::create_native_wrapper(methodHandle method) {
+void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
   ResourceMark rm;
   nmethod* nm = NULL;
 
--- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -361,7 +361,7 @@
   // return value is the maximum number of VMReg stack slots the convention will use.
   static int java_calling_convention(const BasicType* sig_bt, VMRegPair* regs, int total_args_passed, int is_outgoing);
 
-  static void check_member_name_argument_is_last_argument(methodHandle method,
+  static void check_member_name_argument_is_last_argument(const methodHandle& method,
                                                           const BasicType* sig_bt,
                                                           const VMRegPair* regs) NOT_DEBUG_RETURN;
 
@@ -472,7 +472,7 @@
   // is a JNI critical method, or a compiled method handle adapter,
   // such as _invokeBasic, _linkToVirtual, etc.
   static nmethod* generate_native_wrapper(MacroAssembler* masm,
-                                          methodHandle method,
+                                          const methodHandle& method,
                                           int compile_id,
                                           BasicType* sig_bt,
                                           VMRegPair* regs,
@@ -680,8 +680,8 @@
 
   static AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint,
                                         address i2c_entry, address c2i_entry, address c2i_unverified_entry);
-  static void create_native_wrapper(methodHandle method);
-  static AdapterHandlerEntry* get_adapter(methodHandle method);
+  static void create_native_wrapper(const methodHandle& method);
+  static AdapterHandlerEntry* get_adapter(const methodHandle& method);
 
   static void print_handler(const CodeBlob* b) { print_handler_on(tty, b); }
   static void print_handler_on(outputStream* st, const CodeBlob* b);
--- a/hotspot/src/share/vm/runtime/signature.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/signature.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -242,7 +242,7 @@
 
   void do_void()    { ShouldNotReachHere(); }
 
-  Fingerprinter(methodHandle method) : SignatureIterator(method->signature()) {
+  Fingerprinter(const methodHandle& method) : SignatureIterator(method->signature()) {
     mh = method;
     _fingerprint = 0;
   }
@@ -320,7 +320,7 @@
   virtual void pass_double()           { pass_long(); }  // may be same as long
 #endif
 
-  NativeSignatureIterator(methodHandle method) : SignatureIterator(method->signature()) {
+  NativeSignatureIterator(const methodHandle& method) : SignatureIterator(method->signature()) {
     _method = method;
     _offset = 0;
     _jni_offset = 0;
--- a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -185,7 +185,7 @@
   }
 }
 
-nmethod* SimpleThresholdPolicy::event(methodHandle method, methodHandle inlinee,
+nmethod* SimpleThresholdPolicy::event(const methodHandle& method, const methodHandle& inlinee,
                                       int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread) {
   if (comp_level == CompLevel_none &&
       JvmtiExport::can_post_interpreter_events() &&
@@ -222,7 +222,7 @@
 }
 
 // Check if the method can be compiled, change level if necessary
-void SimpleThresholdPolicy::compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread) {
+void SimpleThresholdPolicy::compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) {
   assert(level <= TieredStopAtLevel, "Invalid compilation level");
   if (level == CompLevel_none) {
     return;
@@ -249,7 +249,7 @@
 }
 
 // Tell the broker to compile the method
-void SimpleThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread) {
+void SimpleThresholdPolicy::submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) {
   int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
   CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", thread);
 }
@@ -377,7 +377,7 @@
 
 
 // Handle the invocation event.
-void SimpleThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
+void SimpleThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
                                               CompLevel level, nmethod* nm, JavaThread* thread) {
   if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
     CompLevel next_level = call_event(mh(), level);
@@ -389,7 +389,7 @@
 
 // Handle the back branch event. Notice that we can compile the method
 // with a regular entry from here.
-void SimpleThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
+void SimpleThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh,
                                                      int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
   // If the method is already compiling, quickly bail out.
   if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
--- a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -67,9 +67,9 @@
   // Print policy-specific information if necessary
   virtual void print_specific(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level) { }
   // Check if the method can be compiled, change level if necessary
-  void compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread);
+  void compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread);
   // Submit a given method for compilation
-  virtual void submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread);
+  virtual void submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread);
   // Simple methods are as good being compiled with C1 as C2.
   // This function tells if it's such a function.
   inline bool is_trivial(Method* method);
@@ -87,9 +87,9 @@
     }
     return CompLevel_none;
   }
-  virtual void method_invocation_event(methodHandle method, methodHandle inlinee,
+  virtual void method_invocation_event(const methodHandle& method, const methodHandle& inlinee,
                                        CompLevel level, nmethod* nm, JavaThread* thread);
-  virtual void method_back_branch_event(methodHandle method, methodHandle inlinee,
+  virtual void method_back_branch_event(const methodHandle& method, const methodHandle& inlinee,
                                         int bci, CompLevel level, nmethod* nm, JavaThread* thread);
 public:
   SimpleThresholdPolicy() : _c1_count(0), _c2_count(0) { }
@@ -103,7 +103,7 @@
   virtual void delay_compilation(Method* method) { }
   virtual void disable_compilation(Method* method) { }
   virtual void reprofile(ScopeDesc* trap_scope, bool is_osr);
-  virtual nmethod* event(methodHandle method, methodHandle inlinee,
+  virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee,
                          int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread);
   // Select task is called by CompileBroker. We should return a task or NULL.
   virtual CompileTask* select_task(CompileQueue* compile_queue);
--- a/hotspot/src/share/vm/runtime/synchronizer.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/synchronizer.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -116,7 +116,7 @@
 // global list of blocks of monitors
 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't
 // want to expose the PaddedEnd template more than necessary.
-ObjectMonitor * ObjectSynchronizer::gBlockList = NULL;
+ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL;
 // global monitor free list
 ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL;
 // global monitor in-use list, for moribund threads,
@@ -890,21 +890,22 @@
 
   return NULL;
 }
+
 // Visitors ...
 
 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
-  PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList;
-  ObjectMonitor* mid;
-  while (block) {
+  PaddedEnd<ObjectMonitor> * block =
+    (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
+  while (block != NULL) {
     assert(block->object() == CHAINMARKER, "must be a block header");
     for (int i = _BLOCKSIZE - 1; i > 0; i--) {
-      mid = (ObjectMonitor *)(block + i);
-      oop object = (oop) mid->object();
+      ObjectMonitor* mid = (ObjectMonitor *)(block + i);
+      oop object = (oop)mid->object();
       if (object != NULL) {
         closure->do_monitor(mid);
       }
     }
-    block = (PaddedEnd<ObjectMonitor> *) block->FreeNext;
+    block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
   }
 }
 
@@ -919,9 +920,9 @@
 
 void ObjectSynchronizer::oops_do(OopClosure* f) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-  for (PaddedEnd<ObjectMonitor> * block =
-       (PaddedEnd<ObjectMonitor> *)gBlockList; block != NULL;
-       block = (PaddedEnd<ObjectMonitor> *)next(block)) {
+  PaddedEnd<ObjectMonitor> * block =
+    (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
+  for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
     assert(block->object() == CHAINMARKER, "must be a block header");
     for (int i = 1; i < _BLOCKSIZE; i++) {
       ObjectMonitor* mid = (ObjectMonitor *)&block[i];
@@ -1139,7 +1140,9 @@
     // The very first objectMonitor in a block is reserved and dedicated.
     // It serves as blocklist "next" linkage.
     temp[0].FreeNext = gBlockList;
-    gBlockList = temp;
+    // There are lock-free uses of gBlockList so make sure that
+    // the previous stores happen before we update gBlockList.
+    OrderAccess::release_store_ptr(&gBlockList, temp);
 
     // Add the new string of objectMonitors to the global free list
     temp[_BLOCKSIZE - 1].FreeNext = gFreeList;
@@ -1621,31 +1624,33 @@
       nInuse += gOmInUseCount;
     }
 
-  } else for (PaddedEnd<ObjectMonitor> * block =
-              (PaddedEnd<ObjectMonitor> *)gBlockList; block != NULL;
-              block = (PaddedEnd<ObjectMonitor> *)next(block)) {
-    // Iterate over all extant monitors - Scavenge all idle monitors.
-    assert(block->object() == CHAINMARKER, "must be a block header");
-    nInCirculation += _BLOCKSIZE;
-    for (int i = 1; i < _BLOCKSIZE; i++) {
-      ObjectMonitor* mid = (ObjectMonitor*)&block[i];
-      oop obj = (oop) mid->object();
+  } else {
+    PaddedEnd<ObjectMonitor> * block =
+      (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
+    for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
+      // Iterate over all extant monitors - Scavenge all idle monitors.
+      assert(block->object() == CHAINMARKER, "must be a block header");
+      nInCirculation += _BLOCKSIZE;
+      for (int i = 1; i < _BLOCKSIZE; i++) {
+        ObjectMonitor* mid = (ObjectMonitor*)&block[i];
+        oop obj = (oop)mid->object();
 
-      if (obj == NULL) {
-        // The monitor is not associated with an object.
-        // The monitor should either be a thread-specific private
-        // free list or the global free list.
-        // obj == NULL IMPLIES mid->is_busy() == 0
-        guarantee(!mid->is_busy(), "invariant");
-        continue;
-      }
-      deflated = deflate_monitor(mid, obj, &freeHeadp, &freeTailp);
+        if (obj == NULL) {
+          // The monitor is not associated with an object.
+          // The monitor should either be a thread-specific private
+          // free list or the global free list.
+          // obj == NULL IMPLIES mid->is_busy() == 0
+          guarantee(!mid->is_busy(), "invariant");
+          continue;
+        }
+        deflated = deflate_monitor(mid, obj, &freeHeadp, &freeTailp);
 
-      if (deflated) {
-        mid->FreeNext = NULL;
-        nScavenged++;
-      } else {
-        nInuse++;
+        if (deflated) {
+          mid->FreeNext = NULL;
+          nScavenged++;
+        } else {
+          nInuse++;
+        }
       }
     }
   }
@@ -1789,18 +1794,18 @@
 
 // Verify all monitors in the monitor cache, the verification is weak.
 void ObjectSynchronizer::verify() {
-  PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList;
-  ObjectMonitor* mid;
-  while (block) {
+  PaddedEnd<ObjectMonitor> * block =
+    (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
+  while (block != NULL) {
     assert(block->object() == CHAINMARKER, "must be a block header");
     for (int i = 1; i < _BLOCKSIZE; i++) {
-      mid = (ObjectMonitor *)(block + i);
-      oop object = (oop) mid->object();
+      ObjectMonitor* mid = (ObjectMonitor *)(block + i);
+      oop object = (oop)mid->object();
       if (object != NULL) {
         mid->verify();
       }
     }
-    block = (PaddedEnd<ObjectMonitor> *) block->FreeNext;
+    block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
   }
 }
 
@@ -1809,19 +1814,19 @@
 // the list of extant blocks without taking a lock.
 
 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
-  PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList;
-
-  while (block) {
+  PaddedEnd<ObjectMonitor> * block =
+    (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
+  while (block != NULL) {
     assert(block->object() == CHAINMARKER, "must be a block header");
     if (monitor > (ObjectMonitor *)&block[0] &&
         monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) {
-      address mon = (address) monitor;
-      address blk = (address) block;
+      address mon = (address)monitor;
+      address blk = (address)block;
       size_t diff = mon - blk;
-      assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "check");
+      assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned");
       return 1;
     }
-    block = (PaddedEnd<ObjectMonitor> *) block->FreeNext;
+    block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
   }
   return 0;
 }
--- a/hotspot/src/share/vm/runtime/synchronizer.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/synchronizer.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -140,7 +140,7 @@
   // global list of blocks of monitors
   // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't
   // want to expose the PaddedEnd template more than necessary.
-  static ObjectMonitor * gBlockList;
+  static ObjectMonitor * volatile gBlockList;
   // global monitor free list
   static ObjectMonitor * volatile gFreeList;
   // global monitor in-use list, for moribund threads,
--- a/hotspot/src/share/vm/runtime/thread.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -2101,7 +2101,7 @@
           frame f = last_frame();
           tty->print(" (pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " )", p2i(f.pc()), p2i(f.sp()));
         }
-        tty->print_cr(" of type: %s", InstanceKlass::cast(_pending_async_exception->klass())->external_name());
+        tty->print_cr(" of type: %s", _pending_async_exception->klass()->external_name());
       }
       _pending_async_exception = NULL;
       clear_has_async_exception();
@@ -2219,10 +2219,10 @@
 
       if (TraceExceptions) {
         ResourceMark rm;
-        tty->print_cr("Pending Async. exception installed of type: %s", InstanceKlass::cast(_pending_async_exception->klass())->external_name());
+        tty->print_cr("Pending Async. exception installed of type: %s", _pending_async_exception->klass()->external_name());
       }
       // for AbortVMOnException flag
-      Exceptions::debug_check_abort(InstanceKlass::cast(_pending_async_exception->klass())->external_name());
+      Exceptions::debug_check_abort(_pending_async_exception->klass()->external_name());
     }
   }
 
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -284,6 +284,7 @@
 
 #define VM_STRUCTS(nonstatic_field, \
                    static_field, \
+                   static_ptr_volatile_field, \
                    unchecked_nonstatic_field, \
                    volatile_nonstatic_field, \
                    nonproduct_nonstatic_field, \
@@ -687,42 +688,42 @@
      static_field(SystemDictionary,            _shared_dictionary,                            Dictionary*)                           \
      static_field(SystemDictionary,            _system_loader_lock_obj,                       oop)                                   \
      static_field(SystemDictionary,            _loader_constraints,                           LoaderConstraintTable*)                \
-     static_field(SystemDictionary,            WK_KLASS(Object_klass),                        Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(String_klass),                        Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(Class_klass),                         Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(Cloneable_klass),                     Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(ClassLoader_klass),                   Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(Serializable_klass),                  Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(System_klass),                        Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(Throwable_klass),                     Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(ThreadDeath_klass),                   Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(Error_klass),                         Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(Exception_klass),                     Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(RuntimeException_klass),              Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(ClassNotFoundException_klass),        Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(NoClassDefFoundError_klass),          Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(LinkageError_klass),                  Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(ClassCastException_klass),            Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(ArrayStoreException_klass),           Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(VirtualMachineError_klass),           Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(OutOfMemoryError_klass),              Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(StackOverflowError_klass),            Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(ProtectionDomain_klass),              Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(AccessControlContext_klass),          Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(SecureClassLoader_klass),             Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(Reference_klass),                     Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(SoftReference_klass),                 Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(WeakReference_klass),                 Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(FinalReference_klass),                Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(PhantomReference_klass),              Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(Cleaner_klass),                       Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(Finalizer_klass),                     Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(Thread_klass),                        Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(ThreadGroup_klass),                   Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(Properties_klass),                    Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(StringBuffer_klass),                  Klass*)                                \
-     static_field(SystemDictionary,            WK_KLASS(MethodHandle_klass),                  Klass*)                                \
-     static_field(SystemDictionary,            _box_klasses[0],                               Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(Object_klass),                        InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(String_klass),                        InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(Class_klass),                         InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(Cloneable_klass),                     InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(ClassLoader_klass),                   InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(Serializable_klass),                  InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(System_klass),                        InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(Throwable_klass),                     InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(ThreadDeath_klass),                   InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(Error_klass),                         InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(Exception_klass),                     InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(RuntimeException_klass),              InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(ClassNotFoundException_klass),        InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(NoClassDefFoundError_klass),          InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(LinkageError_klass),                  InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(ClassCastException_klass),            InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(ArrayStoreException_klass),           InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(VirtualMachineError_klass),           InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(OutOfMemoryError_klass),              InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(StackOverflowError_klass),            InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(ProtectionDomain_klass),              InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(AccessControlContext_klass),          InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(SecureClassLoader_klass),             InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(Reference_klass),                     InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(SoftReference_klass),                 InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(WeakReference_klass),                 InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(FinalReference_klass),                InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(PhantomReference_klass),              InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(Cleaner_klass),                       InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(Finalizer_klass),                     InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(Thread_klass),                        InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(ThreadGroup_klass),                   InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(Properties_klass),                    InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(StringBuffer_klass),                  InstanceKlass*)                        \
+     static_field(SystemDictionary,            WK_KLASS(MethodHandle_klass),                  InstanceKlass*)                        \
+     static_field(SystemDictionary,            _box_klasses[0],                               InstanceKlass*)                        \
      static_field(SystemDictionary,            _java_system_loader,                           oop)                                   \
                                                                                                                                      \
   /*************/                                                                                                                    \
@@ -1183,7 +1184,7 @@
   volatile_nonstatic_field(BasicLock,          _displaced_header,                             markOop)                               \
   nonstatic_field(BasicObjectLock,             _lock,                                         BasicLock)                             \
   nonstatic_field(BasicObjectLock,             _obj,                                          oop)                                   \
-     static_field(ObjectSynchronizer,          gBlockList,                                    ObjectMonitor*)                        \
+  static_ptr_volatile_field(ObjectSynchronizer, gBlockList,                                   ObjectMonitor*)                        \
                                                                                                                                      \
   /*********************/                                                                                                            \
   /* Matcher (C2 only) */                                                                                                            \
@@ -2902,6 +2903,11 @@
 #define GENERATE_STATIC_VM_STRUCT_ENTRY(typeName, fieldName, type)                 \
  { QUOTE(typeName), QUOTE(fieldName), QUOTE(type), 1, 0, &typeName::fieldName },
 
+// This macro generates a VMStructEntry line for a static pointer volatile field,
+// e.g.: "static ObjectMonitor * volatile gBlockList;"
+#define GENERATE_STATIC_PTR_VOLATILE_VM_STRUCT_ENTRY(typeName, fieldName, type)    \
+ { QUOTE(typeName), QUOTE(fieldName), QUOTE(type), 1, 0, (void *)&typeName::fieldName },
+
 // This macro generates a VMStructEntry line for an unchecked
 // nonstatic field, in which the size of the type is also specified.
 // The type string is given as NULL, indicating an "opaque" type.
@@ -2927,10 +2933,15 @@
 #define CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY(typeName, fieldName, type)        \
  {typedef type dummyvtype; typeName *dummyObj = NULL; volatile dummyvtype* dummy = &dummyObj->fieldName; }
 
-// This macro checks the type of a VMStructEntry by comparing pointer types
+// This macro checks the type of a static VMStructEntry by comparing pointer types
 #define CHECK_STATIC_VM_STRUCT_ENTRY(typeName, fieldName, type)                    \
  {type* dummy = &typeName::fieldName; }
 
+// This macro checks the type of a static pointer volatile VMStructEntry by comparing pointer types,
+// e.g.: "static ObjectMonitor * volatile gBlockList;"
+#define CHECK_STATIC_PTR_VOLATILE_VM_STRUCT_ENTRY(typeName, fieldName, type)       \
+ {type volatile * dummy = &typeName::fieldName; }
+
 // This macro ensures the type of a field and its containing type are
 // present in the type table. The assertion string is shorter than
 // preferable because (incredibly) of a bug in Solstice NFS client
@@ -3141,6 +3152,7 @@
 
   VM_STRUCTS(GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
              GENERATE_STATIC_VM_STRUCT_ENTRY,
+             GENERATE_STATIC_PTR_VOLATILE_VM_STRUCT_ENTRY,
              GENERATE_UNCHECKED_NONSTATIC_VM_STRUCT_ENTRY,
              GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
              GENERATE_NONPRODUCT_NONSTATIC_VM_STRUCT_ENTRY,
@@ -3370,6 +3382,7 @@
 VMStructs::init() {
   VM_STRUCTS(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
              CHECK_STATIC_VM_STRUCT_ENTRY,
+             CHECK_STATIC_PTR_VOLATILE_VM_STRUCT_ENTRY,
              CHECK_NO_OP,
              CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY,
              CHECK_NONPRODUCT_NONSTATIC_VM_STRUCT_ENTRY,
@@ -3491,9 +3504,11 @@
                         CHECK_NO_OP,
                         CHECK_NO_OP,
                         CHECK_NO_OP,
+                        CHECK_NO_OP,
                         CHECK_NO_OP));
   debug_only(VM_STRUCTS(CHECK_NO_OP,
                         ENSURE_FIELD_TYPE_PRESENT,
+                        ENSURE_FIELD_TYPE_PRESENT,
                         CHECK_NO_OP,
                         ENSURE_FIELD_TYPE_PRESENT,
                         ENSURE_NONPRODUCT_FIELD_TYPE_PRESENT,
--- a/hotspot/src/share/vm/services/classLoadingService.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/services/classLoadingService.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -164,7 +164,7 @@
 
   class_size += k->size();
 
-  if (k->oop_is_instance()) {
+  if (k->is_instance_klass()) {
     class_size += k->methods()->size();
     // FIXME: Need to count the contents of methods
     class_size += k->constants()->size();
--- a/hotspot/src/share/vm/services/heapDumper.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/services/heapDumper.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -722,7 +722,7 @@
         o = oopDesc::load_decode_heap_oop((oop*)addr);
       }
 
-      // reflection and sun.misc.Unsafe classes may have a reference to a
+      // reflection and Unsafe classes may have a reference to a
       // Klass* so filter it out.
       assert(o->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(o));
       writer->write_objectID(o);
@@ -895,9 +895,7 @@
 // creates HPROF_GC_CLASS_DUMP record for the given class and each of
 // its array classes
 void DumperSupport::dump_class_and_array_classes(DumpWriter* writer, Klass* k) {
-  Klass* klass = k;
-  assert(klass->oop_is_instance(), "not an InstanceKlass");
-  InstanceKlass* ik = (InstanceKlass*)klass;
+  InstanceKlass* ik = InstanceKlass::cast(k);
 
   // Ignore the class if it hasn't been initialized yet
   if (!ik->is_linked()) {
@@ -939,10 +937,10 @@
   dump_instance_field_descriptors(writer, k);
 
   // array classes
-  k = klass->array_klass_or_null();
+  k = k->array_klass_or_null();
   while (k != NULL) {
     Klass* klass = k;
-    assert(klass->oop_is_objArray(), "not an ObjArrayKlass");
+    assert(klass->is_objArray_klass(), "not an ObjArrayKlass");
 
     writer->write_u1(HPROF_GC_CLASS_DUMP);
     writer->write_classID(klass);
@@ -1126,7 +1124,7 @@
   writer->write_symbolID(m->name());                // method's name
   writer->write_symbolID(m->signature());           // method's signature
 
-  assert(m->method_holder()->oop_is_instance(), "not InstanceKlass");
+  assert(m->method_holder()->is_instance_klass(), "not InstanceKlass");
   writer->write_symbolID(m->method_holder()->source_file_name());  // source file name
   writer->write_u4(class_serial_num);               // class serial number
   writer->write_u4((u4) line_number);               // line number
@@ -1248,7 +1246,7 @@
     _writer = writer;
   }
   void do_klass(Klass* k) {
-    if (k->oop_is_instance()) {
+    if (k->is_instance_klass()) {
       InstanceKlass* ik = InstanceKlass::cast(k);
         writer()->write_u1(HPROF_GC_ROOT_STICKY_CLASS);
         writer()->write_classID(ik);
@@ -1396,7 +1394,7 @@
     if (oome) {
       assert(!Thread::current()->is_VM_thread(), "Dump from OutOfMemoryError cannot be called by the VMThread");
       // get OutOfMemoryError zero-parameter constructor
-      InstanceKlass* oome_ik = InstanceKlass::cast(SystemDictionary::OutOfMemoryError_klass());
+      InstanceKlass* oome_ik = SystemDictionary::OutOfMemoryError_klass();
       _oome_constructor = oome_ik->find_method(vmSymbols::object_initializer_name(),
                                                           vmSymbols::void_method_signature());
       // get thread throwing OOME when generating the heap dump at OOME
@@ -1553,7 +1551,7 @@
 
 // writes a HPROF_GC_CLASS_DUMP record for the given class
 void VM_HeapDumper::do_class_dump(Klass* k) {
-  if (k->oop_is_instance()) {
+  if (k->is_instance_klass()) {
     DumperSupport::dump_class_and_array_classes(writer(), k);
   }
 }
@@ -1850,7 +1848,6 @@
 }
 
 // dump the heap to given path.
-PRAGMA_FORMAT_NONLITERAL_IGNORED_EXTERNAL
 int HeapDumper::dump(const char* path) {
   assert(path != NULL && strlen(path) > 0, "path missing");
 
@@ -1888,13 +1885,8 @@
   if (print_to_tty()) {
     timer()->stop();
     if (error() == NULL) {
-      char msg[256];
-      sprintf(msg, "Heap dump file created [%s bytes in %3.3f secs]",
-        JLONG_FORMAT, timer()->seconds());
-PRAGMA_DIAG_PUSH
-PRAGMA_FORMAT_NONLITERAL_IGNORED_INTERNAL
-      tty->print_cr(msg, writer.bytes_written());
-PRAGMA_DIAG_POP
+      tty->print_cr("Heap dump file created [" JLONG_FORMAT " bytes in %3.3f secs]",
+                    writer.bytes_written(), timer()->seconds());
     } else {
       tty->print_cr("Dump file is incomplete: %s", writer.error());
     }
--- a/hotspot/src/share/vm/services/serviceUtil.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/services/serviceUtil.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -57,13 +57,13 @@
       if (k->is_klass()) {
         // if it's a class for an object, an object array, or
         // primitive (type) array then it's visible.
-        if (k->oop_is_instance()) {
+        if (k->is_instance_klass()) {
           return true;
         }
-        if (k->oop_is_objArray()) {
+        if (k->is_objArray_klass()) {
           return true;
         }
-        if (k->oop_is_typeArray()) {
+        if (k->is_typeArray_klass()) {
           return true;
         }
       }
--- a/hotspot/src/share/vm/services/threadService.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/services/threadService.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -505,8 +505,7 @@
   int len = (_locked_monitors != NULL ? _locked_monitors->length() : 0);
   for (int i = 0; i < len; i++) {
     oop o = _locked_monitors->at(i);
-    InstanceKlass* ik = InstanceKlass::cast(o->klass());
-    st->print_cr("\t- locked <" INTPTR_FORMAT "> (a %s)", p2i(o), ik->external_name());
+    st->print_cr("\t- locked <" INTPTR_FORMAT "> (a %s)", p2i(o), o->klass()->external_name());
   }
 
 }
@@ -729,8 +728,7 @@
 
   for (int i = 0; i < locks->length(); i++) {
     instanceOop obj = locks->at(i);
-    InstanceKlass* ik = InstanceKlass::cast(obj->klass());
-    st->print_cr("\t- <" INTPTR_FORMAT "> (a %s)", p2i(obj), ik->external_name());
+    st->print_cr("\t- <" INTPTR_FORMAT "> (a %s)", p2i(obj), obj->klass()->external_name());
   }
   st->cr();
 }
@@ -887,7 +885,7 @@
       oop obj = (oop)waitingToLockMonitor->object();
       if (obj != NULL) {
         st->print(" (object " INTPTR_FORMAT ", a %s)", p2i(obj),
-                   (InstanceKlass::cast(obj->klass()))->external_name());
+                   obj->klass()->external_name());
 
         if (!currentThread->current_pending_monitor_is_from_java()) {
           owner_desc = "\n  in JNI, which is held by";
@@ -911,7 +909,7 @@
     } else {
       st->print("  waiting for ownable synchronizer " INTPTR_FORMAT ", (a %s)",
                 p2i(waitingToLockBlocker),
-                (InstanceKlass::cast(waitingToLockBlocker->klass()))->external_name());
+                waitingToLockBlocker->klass()->external_name());
       assert(waitingToLockBlocker->is_a(SystemDictionary::abstract_ownable_synchronizer_klass()),
              "Must be an AbstractOwnableSynchronizer");
       oop ownerObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
--- a/hotspot/src/share/vm/services/threadService.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/services/threadService.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -425,12 +425,12 @@
   }
 
   JavaThreadStatusChanger(JavaThread* java_thread,
-                          java_lang_Thread::ThreadStatus state) {
+                          java_lang_Thread::ThreadStatus state) : _old_state(java_lang_Thread::NEW) {
     save_old_state(java_thread);
     set_thread_status(state);
   }
 
-  JavaThreadStatusChanger(JavaThread* java_thread) {
+  JavaThreadStatusChanger(JavaThread* java_thread) : _old_state(java_lang_Thread::NEW) {
     save_old_state(java_thread);
   }
 
@@ -527,7 +527,7 @@
   // Current thread is the notifying thread which holds the monitor.
   static bool wait_reenter_begin(JavaThread *java_thread, ObjectMonitor *obj_m) {
     assert((java_thread != NULL), "Java thread should not be null here");
-    bool active  = false;
+    bool active = false;
     if (is_alive(java_thread) && ServiceUtil::visible_oop((oop)obj_m->object())) {
       active = contended_enter_begin(java_thread);
     }
@@ -542,7 +542,7 @@
   }
 
   JavaThreadBlockedOnMonitorEnterState(JavaThread *java_thread, ObjectMonitor *obj_m) :
-    JavaThreadStatusChanger(java_thread) {
+    _stat(NULL), _active(false), JavaThreadStatusChanger(java_thread) {
     assert((java_thread != NULL), "Java thread should not be null here");
     // Change thread status and collect contended enter stats for monitor contended
     // enter done for external java world objects and it is contended. All other cases
--- a/hotspot/src/share/vm/services/writeableFlags.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/services/writeableFlags.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -57,7 +57,6 @@
   }
 }
 
-PRAGMA_FORMAT_NONLITERAL_IGNORED_EXTERNAL
 static void print_flag_error_message_if_needed(Flag::Error error, const char* name, FormatBuffer<80>& err_msg) {
   if (error == Flag::SUCCESS) {
     return;
--- a/hotspot/src/share/vm/shark/sharkCompiler.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/shark/sharkCompiler.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2008, 2009, 2010, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -222,7 +222,7 @@
 }
 
 nmethod* SharkCompiler::generate_native_wrapper(MacroAssembler* masm,
-                                                methodHandle    target,
+                                                const methodHandle& target,
                                                 int             compile_id,
                                                 BasicType*      arg_types,
                                                 BasicType       return_type) {
--- a/hotspot/src/share/vm/shark/sharkCompiler.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/shark/sharkCompiler.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2008, 2009, 2010, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -46,7 +46,7 @@
   // Missing feature tests
   bool supports_native() { return true; }
   bool supports_osr()    { return true; }
-  bool can_compile_method(methodHandle method)  {
+  bool can_compile_method(const methodHandle& method)  {
     return ! (method->is_method_handle_intrinsic() || method->is_compiled_lambda_form());
   }
 
@@ -61,7 +61,7 @@
 
   // Generate a wrapper for a native (JNI) method
   nmethod* generate_native_wrapper(MacroAssembler* masm,
-                                   methodHandle    target,
+                                   const methodHandle& target,
                                    int             compile_id,
                                    BasicType*      arg_types,
                                    BasicType       return_type);
--- a/hotspot/src/share/vm/shark/sharkIntrinsics.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/shark/sharkIntrinsics.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -65,7 +65,7 @@
   case vmIntrinsics::_currentThread:
     return true;
 
-    // sun.misc.Unsafe
+    // Unsafe
   case vmIntrinsics::_compareAndSwapInt:
     return true;
 
@@ -139,7 +139,7 @@
     do_Thread_currentThread();
     break;
 
-    // sun.misc.Unsafe
+    // Unsafe
   case vmIntrinsics::_compareAndSwapInt:
     do_Unsafe_compareAndSwapInt();
     break;
--- a/hotspot/src/share/vm/utilities/exceptions.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/utilities/exceptions.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -222,7 +222,7 @@
 }
 
 
-void Exceptions::throw_stack_overflow_exception(Thread* THREAD, const char* file, int line, methodHandle method) {
+void Exceptions::throw_stack_overflow_exception(Thread* THREAD, const char* file, int line, const methodHandle& method) {
   Handle exception;
   if (!THREAD->has_pending_exception()) {
     Klass* k = SystemDictionary::StackOverflowError_klass();
@@ -502,5 +502,5 @@
       message = java_lang_String::as_utf8_string(msg);
     }
   }
-  debug_check_abort(InstanceKlass::cast(exception()->klass())->external_name(), message);
+  debug_check_abort(exception()->klass()->external_name(), message);
 }
--- a/hotspot/src/share/vm/utilities/exceptions.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/utilities/exceptions.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -163,7 +163,7 @@
                               const char* message,
                               ExceptionMsgToUtf8Mode to_utf8_safe = safe_to_utf8);
 
-  static void throw_stack_overflow_exception(Thread* thread, const char* file, int line, methodHandle method);
+  static void throw_stack_overflow_exception(Thread* thread, const char* file, int line, const methodHandle& method);
 
   // Exception counting for error files of interesting exceptions that may have
   // caused a problem for the jvm
--- a/hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp	Fri Oct 30 00:02:37 2015 +0100
@@ -287,8 +287,6 @@
 // Tested to work with clang version 3.1 and better.
 #define PRAGMA_DIAG_PUSH             _Pragma("GCC diagnostic push")
 #define PRAGMA_DIAG_POP              _Pragma("GCC diagnostic pop")
-#define PRAGMA_FORMAT_NONLITERAL_IGNORED_EXTERNAL
-#define PRAGMA_FORMAT_NONLITERAL_IGNORED_INTERNAL PRAGMA_FORMAT_NONLITERAL_IGNORED
 
 // Hack to deal with gcc yammering about non-security format stuff
 #else
@@ -297,8 +295,6 @@
 // versions of the macro-pragma to obtain better checking with newer compilers.
 #define PRAGMA_DIAG_PUSH
 #define PRAGMA_DIAG_POP
-#define PRAGMA_FORMAT_NONLITERAL_IGNORED_EXTERNAL PRAGMA_FORMAT_NONLITERAL_IGNORED
-#define PRAGMA_FORMAT_NONLITERAL_IGNORED_INTERNAL
 #endif
 
 #if (__GNUC__ == 2) && (__GNUC_MINOR__ < 95)
--- a/hotspot/src/share/vm/utilities/xmlstream.cpp	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/src/share/vm/utilities/xmlstream.cpp	Fri Oct 30 00:02:37 2015 +0100
@@ -340,6 +340,7 @@
   print_raw_cr(">");
 }
 
+// If you remove the PRAGMA, this fails to compile with clang-503.0.40.
 PRAGMA_DIAG_PUSH
 PRAGMA_FORMAT_NONLITERAL_IGNORED
 // ------------------------------------------------------------------
--- a/hotspot/test/Makefile	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/test/Makefile	Fri Oct 30 00:02:37 2015 +0100
@@ -243,6 +243,9 @@
 	$(ECHO) "Running tests: $@"
 	$(MAKE) -j 1 TEST_SELECTION=":$@" UNIQUE_DIR=$@ jtreg_tests;
 
+hotspot_internal:
+	$(ALT_OUTPUTDIR)/jdk/bin/java -XX:+ExecuteInternalVMTests -XX:+ShowMessageBoxOnError -version
+
 # Prep for output
 prep: clean
 	@$(MKDIR) -p $(ABS_TEST_OUTPUT_DIR)
--- a/hotspot/test/compiler/jvmci/compilerToVM/GetConstantPoolTest.java	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/test/compiler/jvmci/compilerToVM/GetConstantPoolTest.java	Fri Oct 30 00:02:37 2015 +0100
@@ -27,6 +27,7 @@
  * @bug 8136421
  * @requires (os.simpleArch == "x64" | os.simpleArch == "sparcv9") & os.arch != "aarch64"
  * @library /testlibrary /../../test/lib /
+ * @ignore 8139385
  * @compile ../common/CompilerToVMHelper.java
  * @build sun.hotspot.WhiteBox
  *        compiler.jvmci.compilerToVM.GetConstantPoolTest
--- a/hotspot/test/compiler/membars/DekkerTest.java	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/test/compiler/membars/DekkerTest.java	Fri Oct 30 00:02:37 2015 +0100
@@ -25,9 +25,9 @@
  * @test
  * @bug 8007898
  * @summary Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier().
- * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:-TieredCompilation -XX:+StressGCM -XX:+StressLCM DekkerTest
- * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:-TieredCompilation -XX:+StressGCM -XX:+StressLCM DekkerTest
- * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:-TieredCompilation -XX:+StressGCM -XX:+StressLCM DekkerTest
+ * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation -XX:CICompilerCount=1 -XX:+StressGCM -XX:+StressLCM DekkerTest
+ * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation -XX:CICompilerCount=1 -XX:+StressGCM -XX:+StressLCM DekkerTest
+ * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation -XX:CICompilerCount=1 -XX:+StressGCM -XX:+StressLCM DekkerTest
  * @author Martin Doerr martin DOT doerr AT sap DOT com
  *
  * Run 3 times since the failure is intermittent.
--- a/hotspot/test/compiler/unsafe/UnsafeGetConstantField.java	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/test/compiler/unsafe/UnsafeGetConstantField.java	Fri Oct 30 00:02:37 2015 +0100
@@ -43,13 +43,13 @@
 import jdk.internal.org.objectweb.asm.*;
 import jdk.test.lib.Asserts;
 import jdk.test.lib.Utils;
-import sun.misc.Unsafe;
+import jdk.internal.misc.Unsafe;
 import static jdk.internal.org.objectweb.asm.Opcodes.*;
 
 public class UnsafeGetConstantField {
     static final Class<?> THIS_CLASS = UnsafeGetConstantField.class;
 
-    static final Unsafe U = Utils.getUnsafe();
+    static final Unsafe U = Unsafe.getUnsafe();
 
     public static void main(String[] args) {
         testUnsafeGetAddress();
--- a/hotspot/test/gc/g1/TestGCLogMessages.java	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/test/gc/g1/TestGCLogMessages.java	Fri Oct 30 00:02:37 2015 +0100
@@ -79,6 +79,7 @@
         // Misc Top-level
         new LogMessageWithLevel("Code Root Purge", Level.FINER),
         new LogMessageWithLevel("String Dedup Fixup", Level.FINER),
+        new LogMessageWithLevel("Expand Heap After Collection", Level.FINER),
         // Free CSet
         new LogMessageWithLevel("Young Free CSet", Level.FINEST),
         new LogMessageWithLevel("Non-Young Free CSet", Level.FINEST),
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/CommandLine/IgnoreUnrecognizedVMOptions.java	Fri Oct 30 00:02:37 2015 +0100
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import jdk.test.lib.*;
+
+/*
+ * @test
+ * @bug 8129855
+ * @summary -XX:+IgnoreUnrecognizedVMOptions should work according to the spec from JDK-8129855
+ *
+ * @library /testlibrary
+ * @modules java.base/sun.misc
+ *          java.management
+ * @run main IgnoreUnrecognizedVMOptions
+ */
+public class IgnoreUnrecognizedVMOptions {
+
+  private static void runJavaAndCheckExitValue(boolean shouldSucceed, String... args) throws Exception {
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args);
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+    if (shouldSucceed) {
+      output.shouldHaveExitValue(0);
+    } else {
+      output.shouldHaveExitValue(1);
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    boolean product = !Platform.isDebugBuild();
+
+    /*
+      #1.1 wrong value and non-existing flag:
+                                    exists, invalid value           does not exist
+                                    -XX:MinHeapFreeRatio=notnum     -XX:THIS_FLAG_DOESNT_EXIST
+      -IgnoreUnrecognizedVMOptions               ERR                           ERR
+      +IgnoreUnrecognizedVMOptions               ERR                           OK
+    */
+    runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:MinHeapFreeRatio=notnum", "-version");
+    runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:THIS_FLAG_DOESNT_EXIST", "-version");
+    runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:MinHeapFreeRatio=notnum", "-version");
+    runJavaAndCheckExitValue(true, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:THIS_FLAG_DOESNT_EXIST", "-version");
+
+    /*
+      #1.2 normal flag with ranges:
+                                      exists, in range                exists, out of range
+                                      -XX:StackRedPages=1             -XX:StackRedPages=0
+      -IgnoreUnrecognizedVMOptions               OK                            ERR
+      +IgnoreUnrecognizedVMOptions               OK                            ERR
+    */
+    runJavaAndCheckExitValue(true, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:StackRedPages=1", "-version");
+    runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:StackRedPages=0", "-version");
+    runJavaAndCheckExitValue(true, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:StackRedPages=1", "-version");
+    runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:StackRedPages=0", "-version");
+
+    /*
+      #1.3 develop & notproduct flag on debug VM:
+                                      develop & !product_build        notproduct & !product_build
+                                      -XX:+DeoptimizeALot             -XX:+VerifyCodeCache
+      -IgnoreUnrecognizedVMOptions               OK                            OK
+      +IgnoreUnrecognizedVMOptions               OK                            OK
+    */
+    if (!product) {
+      runJavaAndCheckExitValue(true, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:+DeoptimizeALot", "-version");
+      runJavaAndCheckExitValue(true, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:+VerifyCodeCache", "-version");
+      runJavaAndCheckExitValue(true, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:+DeoptimizeALot", "-version");
+      runJavaAndCheckExitValue(true, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:+VerifyCodeCache", "-version");
+    }
+
+    /*
+      #1.4 develop & notproduct flag on product VM:
+                                    develop & !product_build           notproduct & product_build
+                                    -XX:+DeoptimizeALot                -XX:+VerifyCodeCache
+      -IgnoreUnrecognizedVMOptions               ERR                           ERR
+      +IgnoreUnrecognizedVMOptions               OK                            OK
+    */
+    if (product) {
+      runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:+DeoptimizeALot", "-version");
+      runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:+VerifyCodeCache", "-version");
+      runJavaAndCheckExitValue(true, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:+DeoptimizeALot", "-version");
+      runJavaAndCheckExitValue(true, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:+VerifyCodeCache", "-version");
+    }
+
+
+    /*
+      #1.5 malformed develop & notproduct flag on debug VM:
+                                  develop & !product_build             notproduct & !product_build
+                                  -XX:DeoptimizeALot                   -XX:VerifyCodeCache
+      -IgnoreUnrecognizedVMOptions               ERR                           ERR
+      +IgnoreUnrecognizedVMOptions               ERR                           ERR
+    */
+    if (!product) {
+      runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:DeoptimizeALot", "-version");
+      runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:VerifyCodeCache", "-version");
+      runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:DeoptimizeALot", "-version");
+      runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:VerifyCodeCache", "-version");
+    }
+
+    /*
+      #1.6 malformed develop & notproduct flag on product VM:
+                                    develop & !product_build           notproduct & product_build
+                                    -XX:DeoptimizeALot                 -XX:VerifyCodeCache
+      -IgnoreUnrecognizedVMOptions               ERR                           ERR
+      +IgnoreUnrecognizedVMOptions               OK                            OK
+    */
+    if (product) {
+      runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:DeoptimizeALot", "-version");
+      runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:VerifyCodeCache", "-version");
+      runJavaAndCheckExitValue(true, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:DeoptimizeALot", "-version");
+      runJavaAndCheckExitValue(true, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:VerifyCodeCache", "-version");
+    }
+
+    /*
+      #1.7 locked flag:
+                                      diagnostic & locked             experimental & locked             commercial & locked
+                                      -XX:-UnlockDiagnosticVMOptions  -XX:-UnlockExperimentalVMOptions  -XX:-UnlockCommercialFeatures
+                                      -XX:+PrintInlining              -XX:+AlwaysSafeConstructors       -XX:+FlightRecorder
+      -IgnoreUnrecognizedVMOptions               ERR                           ERR                                 ERR
+      +IgnoreUnrecognizedVMOptions               ERR                           ERR                                 ERR
+    */
+    runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:-UnlockDiagnosticVMOptions", "-XX:+PrintInlining", "-version");
+    runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:-UnlockExperimentalVMOptions", "-XX:+AlwaysSafeConstructors", "-version");
+    runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:-UnlockCommercialFeatures", "-XX:+FlightRecorder", "-version");
+    runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:-UnlockDiagnosticVMOptions", "-XX:+PrintInlining", "-version");
+    runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:-UnlockExperimentalVMOptions", "-XX:+AlwaysSafeConstructors", "-version");
+    runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:-UnlockCommercialFeatures", "-XX:+FlightRecorder", "-version");
+
+    /*
+      #1.8 malformed locked flag:
+                                    diagnostic & locked             experimental & locked             commercial & locked
+                                    -XX:-UnlockDiagnosticVMOptions  -XX:-UnlockExperimentalVMOptions  -XX:-UnlockCommercialFeatures
+                                    -XX:PrintInlining               -XX:AlwaysSafeConstructors        -XX:FlightRecorder
+      -IgnoreUnrecognizedVMOptions               ERR                           ERR                                 ERR
+      +IgnoreUnrecognizedVMOptions               ERR                           ERR                                 ERR
+    */
+    runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:-UnlockDiagnosticVMOptions", "-XX:PrintInlining", "-version");
+    runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:-UnlockExperimentalVMOptions", "-XX:AlwaysSafeConstructors", "-version");
+    runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:-UnlockCommercialFeatures", "-XX:FlightRecorder", "-version");
+    runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:-UnlockDiagnosticVMOptions", "-XX:PrintInlining", "-version");
+    runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:-UnlockExperimentalVMOptions", "-XX:AlwaysSafeConstructors", "-version");
+    runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:-UnlockCommercialFeatures", "-XX:FlightRecorder", "-version");
+
+    /*
+      #1.9 malformed unlocked flag:
+                                    diagnostic & locked             experimental & locked             commercial & locked
+                                    -XX:+UnlockDiagnosticVMOptions  -XX:+UnlockExperimentalVMOptions  -XX:+UnlockCommercialFeatures
+                                    -XX:PrintInlining               -XX:AlwaysSafeConstructors        -XX:FlightRecorder
+      -IgnoreUnrecognizedVMOptions               ERR                           ERR                                 ERR
+      +IgnoreUnrecognizedVMOptions               ERR                           ERR                                 ERR
+    */
+    runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:-UnlockDiagnosticVMOptions", "-XX:PrintInlining", "-version");
+    runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:-UnlockExperimentalVMOptions", "-XX:AlwaysSafeConstructors", "-version");
+    runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:-UnlockCommercialFeatures", "-XX:FlightRecorder", "-version");
+    runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:-UnlockDiagnosticVMOptions", "-XX:PrintInlining", "-version");
+    runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:-UnlockExperimentalVMOptions", "-XX:AlwaysSafeConstructors", "-version");
+    runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:-UnlockCommercialFeatures", "-XX:FlightRecorder", "-version");
+  }
+}
--- a/hotspot/test/runtime/CommandLine/OptionsValidation/TestOptionsWithRanges.java	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/test/runtime/CommandLine/OptionsValidation/TestOptionsWithRanges.java	Fri Oct 30 00:02:37 2015 +0100
@@ -29,7 +29,7 @@
  *          java.management
  *          jdk.attach
  *          jdk.management/sun.tools.attach
- * @run main/othervm/timeout=600 TestOptionsWithRanges
+ * @run main/othervm/timeout=780 TestOptionsWithRanges
  */
 
 import java.util.ArrayList;
@@ -70,6 +70,8 @@
         allOptionsAsMap.remove("G1ConcRefinementThreads");
         allOptionsAsMap.remove("G1RSetRegionEntries");
         allOptionsAsMap.remove("G1RSetSparseRegionEntries");
+        allOptionsAsMap.remove("G1UpdateBufferSize");
+        allOptionsAsMap.remove("InitialBootClassLoaderMetaspaceSize");
 
         /*
          * Remove parameters controlling the code cache. As these
--- a/hotspot/test/runtime/CommandLine/OptionsValidation/common/optionsvalidation/JVMOptionsUtils.java	Wed Jul 05 20:56:54 2017 +0200
+++ b/hotspot/test/runtime/CommandLine/OptionsValidation/common/optionsvalidation/JVMOptionsUtils.java	Fri Oct 30 00:02:37 2015 +0100
@@ -116,20 +116,6 @@
     }
 
     /**
-     * Add dependency for option depending on it's type. E.g. run the JVM in
-     * compilation mode for compiler options.
-     *
-     * @param option option
-     * @param type type of the option
-     */
-    private static void addTypeDependency(JVMOption option, String type) {
-        if (type.contains("C1") || type.contains("C2")) {
-            /* Run in compiler mode for compiler flags */
-            option.addPrepend("-Xcomp");
-        }
-    }
-
-    /**
      * Parse JVM Options. Get input from "inputReader". Parse using
      * "-XX:+PrintFlagsRanges" output format.
      *
@@ -214,7 +200,6 @@
             token = token.substring(1, token.indexOf("}"));
 
             if (acceptOrigin.test(token)) {
-                addTypeDependency(option, token);
                 addNameDependency(option);
 
                 allOptions.put(name, option);