Merge
authorduke
Wed, 05 Jul 2017 19:50:54 +0200
changeset 25581 9867e1efa2f0
parent 25580 83960d9537f6 (current diff)
parent 25511 99f847be8aee (diff)
child 25583 b45c49ed39cc
Merge
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.cpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp
hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp
hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.hpp
--- a/.hgtags-top-repo	Fri Jul 18 08:25:58 2014 -0700
+++ b/.hgtags-top-repo	Wed Jul 05 19:50:54 2017 +0200
@@ -265,3 +265,4 @@
 ee4fd72b2ec3d92497f37163352f294aa695c6fb jdk9-b20
 9052803f4d01feda28b3d65f2b64dd457d21c7b6 jdk9-b21
 8e4bdab4c362aadde2d321f968cd503a2f779e2f jdk9-b22
+88567461a2cd9b7fb431fee6440005a694df1f47 jdk9-b23
--- a/common/autoconf/flags.m4	Fri Jul 18 08:25:58 2014 -0700
+++ b/common/autoconf/flags.m4	Wed Jul 05 19:50:54 2017 +0200
@@ -407,11 +407,7 @@
         C_O_FLAG_HI="-O3"
         C_O_FLAG_NORM="-O2"
       fi
-      if test "x$HAS_CFLAG_OPTIMIZE_DEBUG" = "xtrue"; then
-        C_O_FLAG_DEBUG="$CFLAG_OPTIMIZE_DEBUG_FLAG"
-      else
-        C_O_FLAG_DEBUG="-O0"
-      fi
+      C_O_FLAG_DEBUG="-O0"
       C_O_FLAG_NONE="-O0"
     elif test "x$TOOLCHAIN_TYPE" = xclang; then
       if test "x$OPENJDK_TARGET_OS" = xmacosx; then
--- a/common/autoconf/generated-configure.sh	Fri Jul 18 08:25:58 2014 -0700
+++ b/common/autoconf/generated-configure.sh	Wed Jul 05 19:50:54 2017 +0200
@@ -4311,7 +4311,7 @@
 #CUSTOM_AUTOCONF_INCLUDE
 
 # Do not change or remove the following line, it is needed for consistency checks:
-DATE_WHEN_GENERATED=1403557683
+DATE_WHEN_GENERATED=1404942241
 
 ###############################################################################
 #
@@ -42011,11 +42011,7 @@
         C_O_FLAG_HI="-O3"
         C_O_FLAG_NORM="-O2"
       fi
-      if test "x$HAS_CFLAG_OPTIMIZE_DEBUG" = "xtrue"; then
-        C_O_FLAG_DEBUG="$CFLAG_OPTIMIZE_DEBUG_FLAG"
-      else
-        C_O_FLAG_DEBUG="-O0"
-      fi
+      C_O_FLAG_DEBUG="-O0"
       C_O_FLAG_NONE="-O0"
     elif test "x$TOOLCHAIN_TYPE" = xclang; then
       if test "x$OPENJDK_TARGET_OS" = xmacosx; then
--- a/hotspot/.hgtags	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/.hgtags	Wed Jul 05 19:50:54 2017 +0200
@@ -425,3 +425,4 @@
 c1af79d122ec9f715fa29312b5e91763f3a4dfc4 jdk9-b20
 17b4a5e831b398738feedb0afe75245744510153 jdk9-b21
 518d1fcc0799494f013e00e0a94a91b6f212d54f jdk9-b22
+dd472cdacc32e3afc7c5bfa7ef16ea0e0befb7fa jdk9-b23
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegion.java	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegion.java	Wed Jul 05 19:50:54 2017 +0200
@@ -24,23 +24,26 @@
 
 package sun.jvm.hotspot.gc_implementation.g1;
 
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Observable;
 import java.util.Observer;
-
 import sun.jvm.hotspot.debugger.Address;
-import sun.jvm.hotspot.memory.ContiguousSpace;
+import sun.jvm.hotspot.memory.CompactibleSpace;
+import sun.jvm.hotspot.memory.MemRegion;
 import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.types.AddressField;
 import sun.jvm.hotspot.types.CIntegerField;
 import sun.jvm.hotspot.types.Type;
 import sun.jvm.hotspot.types.TypeDataBase;
 
 // Mirror class for HeapRegion. Currently we don't actually include
-// any of its fields but only iterate over it (which we get "for free"
-// as HeapRegion ultimately inherits from ContiguousSpace).
+// any of its fields but only iterate over it.
 
-public class HeapRegion extends ContiguousSpace {
+public class HeapRegion extends CompactibleSpace {
     // static int GrainBytes;
     static private CIntegerField grainBytesField;
+    static private AddressField topField;
 
     static {
         VM.registerVMInitializedObserver(new Observer() {
@@ -54,6 +57,8 @@
         Type type = db.lookupType("HeapRegion");
 
         grainBytesField = type.getCIntegerField("GrainBytes");
+        topField = type.getAddressField("_top");
+
     }
 
     static public long grainBytes() {
@@ -63,4 +68,25 @@
     public HeapRegion(Address addr) {
         super(addr);
     }
+
+    public Address top() {
+        return topField.getValue(addr);
+    }
+
+    @Override
+    public List getLiveRegions() {
+        List res = new ArrayList();
+        res.add(new MemRegion(bottom(), top()));
+        return res;
+    }
+
+    @Override
+    public long used() {
+        return top().minus(bottom());
+    }
+
+    @Override
+    public long free() {
+        return end().minus(top());
+    }
 }
--- a/hotspot/make/bsd/makefiles/gcc.make	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/make/bsd/makefiles/gcc.make	Wed Jul 05 19:50:54 2017 +0200
@@ -280,16 +280,7 @@
 
 # optimization control flags (Used by fastdebug and release variants)
 OPT_CFLAGS/NOOPT=-O0
-ifeq ($(USE_CLANG), true)
-  # Clang does not support -Og
-  OPT_CFLAGS/DEBUG=-O0
-else ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1"
-  # Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination)
-  OPT_CFLAGS/DEBUG=-Og
-else
-  # Allow no optimizations.
- OPT_CFLAGS/DEBUG=-O0
-endif
+OPT_CFLAGS/DEBUG=-O0
 OPT_CFLAGS/SIZE=-Os
 OPT_CFLAGS/SPEED=-O3
 
@@ -457,16 +448,8 @@
   CFLAGS += -flimit-debug-info
 endif
 
-ifeq ($(USE_CLANG), true)
-  # Clang does not support -Og
-  DEBUG_CFLAGS=-O0
-else ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1"
-  # Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination)
-  DEBUG_CFLAGS=-Og
-else
-  # Allow no optimizations.
-  DEBUG_CFLAGS=-O0
-endif
+# Allow no optimizations.
+DEBUG_CFLAGS=-O0
 
 # DEBUG_BINARIES uses full -g debug information for all configs
 ifeq ($(DEBUG_BINARIES), true)
--- a/hotspot/make/excludeSrc.make	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/make/excludeSrc.make	Wed Jul 05 19:50:54 2017 +0200
@@ -93,6 +93,7 @@
 	ageTable.cpp							\
 	collectorCounters.cpp						\
 	cSpaceCounters.cpp						\
+	gcId.cpp							\
 	gcPolicyCounters.cpp						\
 	gcStats.cpp							\
 	gcTimer.cpp							\
--- a/hotspot/make/linux/Makefile	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/make/linux/Makefile	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -67,8 +67,12 @@
   endif
 endif
 # C1 is not ported on ppc64, so we cannot build a tiered VM:
-ifeq ($(ARCH),ppc64)
-  FORCE_TIERED=0
+# Notice: after 8046471 ARCH will be 'ppc' for top-level ppc64 builds but
+# 'ppc64' for HotSpot-only ppc64 builds. Need to detect both variants here!
+ifneq (,$(findstring $(ARCH), ppc ppc64))
+  ifeq ($(ARCH_DATA_MODEL), 64)
+    FORCE_TIERED=0
+  endif
 endif
 
 ifdef LP64
--- a/hotspot/make/linux/makefiles/defs.make	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/make/linux/makefiles/defs.make	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -108,7 +108,9 @@
 endif
 
 # PPC
-ifneq (,$(findstring $(ARCH), ppc))
+# Notice: after 8046471 ARCH will be 'ppc' for top-level ppc64 builds but
+# 'ppc64' for HotSpot-only ppc64 builds. Need to detect both variants here!
+ifneq (,$(findstring $(ARCH), ppc ppc64))
   ifeq ($(ARCH_DATA_MODEL), 64)
     MAKE_ARGS        += LP64=1
     PLATFORM         = linux-ppc64
--- a/hotspot/make/linux/makefiles/dtrace.make	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/make/linux/makefiles/dtrace.make	Wed Jul 05 19:50:54 2017 +0200
@@ -40,7 +40,14 @@
 ifneq ($(ALT_SDT_H),)
   SDT_H_FILE = $(ALT_SDT_H)
 else
-  SDT_H_FILE = /usr/include/sys/sdt.h
+  ifeq ($(USE_CLANG), true)
+    # Clang doesn't support the -print-sysroot option and there is no known equivalent
+    # option, so fall back to using / as sysroot
+    SDT_SYSROOT=
+  else
+    SDT_SYSROOT=$(shell $(CXX) -print-sysroot)
+  endif
+  SDT_H_FILE = $(SDT_SYSROOT)/usr/include/sys/sdt.h
 endif
 
 DTRACE_ENABLED = $(shell test -f $(SDT_H_FILE) && echo $(SDT_H_FILE))
--- a/hotspot/make/linux/makefiles/gcc.make	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/make/linux/makefiles/gcc.make	Wed Jul 05 19:50:54 2017 +0200
@@ -231,13 +231,7 @@
 
 # optimization control flags (Used by fastdebug and release variants)
 OPT_CFLAGS/NOOPT=-O0
-ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1"
-  # Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination)
-  OPT_CFLAGS/DEBUG=-Og
-else
-  # Allow no optimizations.
-  OPT_CFLAGS/DEBUG=-O0
-endif
+OPT_CFLAGS/DEBUG=-O0
 OPT_CFLAGS/SIZE=-Os
 OPT_CFLAGS/SPEED=-O3
 
@@ -344,13 +338,8 @@
   CFLAGS += -flimit-debug-info
 endif
 
-ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1"
-  # Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination)
-  DEBUG_CFLAGS=-Og
-else
-  # Allow no optimizations.
-  DEBUG_CFLAGS=-O0
-endif
+# Allow no optimizations.
+DEBUG_CFLAGS=-O0
 
 # DEBUG_BINARIES uses full -g debug information for all configs
 ifeq ($(DEBUG_BINARIES), true)
--- a/hotspot/make/solaris/makefiles/gcc.make	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/make/solaris/makefiles/gcc.make	Wed Jul 05 19:50:54 2017 +0200
@@ -127,13 +127,7 @@
 
 # optimization control flags (Used by fastdebug and release variants)
 OPT_CFLAGS/NOOPT=-O0
-ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1"
-  # Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination)
-  OPT_CFLAGS/DEBUG=-Og
-+else
-  # Allow no optimizations.
-  OPT_CFLAGS/DEBUG=-O0
-endif
+OPT_CFLAGS/DEBUG=-O0
 OPT_CFLAGS/SIZE=-Os
 OPT_CFLAGS/SPEED=-O3
 
@@ -229,14 +223,8 @@
 #------------------------------------------------------------------------
 # Debug flags
 
-ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1"
-  # Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination)
-  DEBUG_CFLAGS=-Og
-else
-  # Allow no optimizations.
-  DEBUG_CFLAGS=-O0
-endif
-
+# Allow no optimizations.
+DEBUG_CFLAGS=-O0
 
 # Use the stabs format for debugging information (this is the default
 # on gcc-2.91). It's good enough, has all the information about line
--- a/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -32,12 +32,6 @@
 #include "runtime/stubCodeGenerator.hpp"
 #include "utilities/defaultStream.hpp"
 #include "vm_version_ppc.hpp"
-#ifdef TARGET_OS_FAMILY_aix
-# include "os_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
 
 # include <sys/sysinfo.h>
 
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
 #include "runtime/biasedLocking.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/objectMonitor.hpp"
-#include "runtime/os.hpp"
+#include "runtime/os.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "utilities/macros.hpp"
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -28,12 +28,6 @@
 #include "runtime/java.hpp"
 #include "runtime/stubCodeGenerator.hpp"
 #include "vm_version_sparc.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
 
 int VM_Version::_features = VM_Version::unknown_m;
 const char* VM_Version::_features_str = "";
--- a/hotspot/src/cpu/x86/vm/frame_x86.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/frame_x86.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -33,7 +33,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/monitorChunk.hpp"
-#include "runtime/os.hpp"
+#include "runtime/os.inline.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/stubCodeGenerator.hpp"
 #include "runtime/stubRoutines.hpp"
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -29,18 +29,6 @@
 #include "runtime/java.hpp"
 #include "runtime/stubCodeGenerator.hpp"
 #include "vm_version_x86.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 
 
 int VM_Version::_cpu;
--- a/hotspot/src/cpu/zero/vm/vm_version_zero.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/cpu/zero/vm/vm_version_zero.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2009 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -29,11 +29,5 @@
 #include "runtime/java.hpp"
 #include "runtime/stubCodeGenerator.hpp"
 #include "vm_version_zero.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 
 // This file is intentionally empty
--- a/hotspot/src/os/aix/vm/attachListener_aix.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/os/aix/vm/attachListener_aix.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2012, 2013 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -25,7 +25,7 @@
 
 #include "precompiled.hpp"
 #include "runtime/interfaceSupport.hpp"
-#include "runtime/os.hpp"
+#include "runtime/os.inline.hpp"
 #include "services/attachListener.hpp"
 #include "services/dtraceAttacher.hpp"
 
--- a/hotspot/src/os/aix/vm/os_aix.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/os/aix/vm/os_aix.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -42,6 +42,7 @@
 #include "memory/filemap.hpp"
 #include "mutex_aix.inline.hpp"
 #include "oops/oop.inline.hpp"
+#include "os_aix.inline.hpp"
 #include "os_share_aix.hpp"
 #include "porting_aix.hpp"
 #include "prims/jniFastGetField.hpp"
@@ -2807,12 +2808,10 @@
   return DontYieldALot;
 }
 
-void os::yield() {
+void os::naked_yield() {
   sched_yield();
 }
 
-os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }
-
 ////////////////////////////////////////////////////////////////////////////////
 // thread priority support
 
@@ -3069,7 +3068,7 @@
 
   for (int n = 0; !osthread->sr.is_suspended(); n++) {
     for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
-      os::yield();
+      os::naked_yield();
     }
 
     // timeout, try to cancel the request
@@ -3103,7 +3102,7 @@
     if (sr_notify(osthread) == 0) {
       for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
         for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
-          os::yield();
+          os::naked_yield();
         }
       }
     } else {
--- a/hotspot/src/os/aix/vm/os_aix.inline.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/os/aix/vm/os_aix.inline.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2012, 2013 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -26,8 +26,6 @@
 #ifndef OS_AIX_VM_OS_AIX_INLINE_HPP
 #define OS_AIX_VM_OS_AIX_INLINE_HPP
 
-#include "runtime/atomic.inline.hpp"
-#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 
 // System includes
@@ -45,18 +43,6 @@
   return pthread_getspecific((pthread_key_t)index);
 }
 
-inline const char* os::file_separator() {
-  return "/";
-}
-
-inline const char* os::line_separator() {
-  return "\n";
-}
-
-inline const char* os::path_separator() {
-  return ":";
-}
-
 // File names are case-sensitive on windows only
 inline int os::file_name_strcmp(const char* s1, const char* s2) {
   return strcmp(s1, s2);
--- a/hotspot/src/os/bsd/vm/attachListener_bsd.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/os/bsd/vm/attachListener_bsd.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,7 @@
 
 #include "precompiled.hpp"
 #include "runtime/interfaceSupport.hpp"
-#include "runtime/os.hpp"
+#include "runtime/os.inline.hpp"
 #include "services/attachListener.hpp"
 #include "services/dtraceAttacher.hpp"
 
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -36,6 +36,7 @@
 #include "memory/filemap.hpp"
 #include "mutex_bsd.inline.hpp"
 #include "oops/oop.inline.hpp"
+#include "os_bsd.inline.hpp"
 #include "os_share_bsd.hpp"
 #include "prims/jniFastGetField.hpp"
 #include "prims/jvm.h"
@@ -1171,10 +1172,6 @@
   ::abort();
 }
 
-// unused on bsd for now.
-void os::set_error_file(const char *logfile) {}
-
-
 // This method is a copy of JDK's sysGetLastErrorString
 // from src/solaris/hpi/src/system_md.c
 
@@ -1831,6 +1828,7 @@
         // determine if this is a legacy image or modules image
         // modules image doesn't have "jre" subdirectory
         len = strlen(buf);
+        assert(len < buflen, "Ran out of buffer space");
         jrelib_p = buf + len;
 
         // Add the appropriate library subdir
@@ -1864,7 +1862,7 @@
     }
   }
 
-  strcpy(saved_jvm_path, buf);
+  strncpy(saved_jvm_path, buf, MAXPATHLEN);
 }
 
 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
@@ -2595,12 +2593,10 @@
   return DontYieldALot;
 }
 
-void os::yield() {
+void os::naked_yield() {
   sched_yield();
 }
 
-os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }
-
 ////////////////////////////////////////////////////////////////////////////////
 // thread priority support
 
@@ -4217,22 +4213,12 @@
   return abstime;
 }
 
-
-// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
-// Conceptually TryPark() should be equivalent to park(0).
-
-int os::PlatformEvent::TryPark() {
-  for (;;) {
-    const int v = _Event;
-    guarantee((v == 0) || (v == 1), "invariant");
-    if (Atomic::cmpxchg(0, &_Event, v) == v) return v;
-  }
-}
-
 void os::PlatformEvent::park() {       // AKA "down()"
   // Invariant: Only the thread associated with the Event/PlatformEvent
   // may call park().
   // TODO: assert that _Assoc != NULL or _Assoc == Self
+  assert(_nParked == 0, "invariant");
+
   int v;
   for (;;) {
       v = _Event;
@@ -4332,8 +4318,7 @@
   //    1 :=> 1
   //   -1 :=> either 0 or 1; must signal target thread
   //          That is, we can safely transition _Event from -1 to either
-  //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
-  //          unpark() calls.
+  //          0 or 1.
   // See also: "Semaphores in Plan 9" by Mullender & Cox
   //
   // Note: Forcing a transition from "-1" to "1" on an unpark() means
@@ -4540,10 +4525,9 @@
 }
 
 void Parker::unpark() {
-  int s, status;
-  status = pthread_mutex_lock(_mutex);
+  int status = pthread_mutex_lock(_mutex);
   assert(status == 0, "invariant");
-  s = _counter;
+  const int s = _counter;
   _counter = 1;
   if (s < 1) {
      if (WorkAroundNPTLTimedWaitHang) {
--- a/hotspot/src/os/bsd/vm/os_bsd.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/os/bsd/vm/os_bsd.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -219,7 +219,6 @@
     int  fired() { return _Event; }
     void park();
     void unpark();
-    int  TryPark();
     int  park(jlong millis);
     void SetAssociation(Thread * a) { _Assoc = a; }
 };
--- a/hotspot/src/os/bsd/vm/os_bsd.inline.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/os/bsd/vm/os_bsd.inline.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -25,8 +25,6 @@
 #ifndef OS_BSD_VM_OS_BSD_INLINE_HPP
 #define OS_BSD_VM_OS_BSD_INLINE_HPP
 
-#include "runtime/atomic.inline.hpp"
-#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 
 // System includes
@@ -40,18 +38,6 @@
   return pthread_getspecific((pthread_key_t)index);
 }
 
-inline const char* os::file_separator() {
-  return "/";
-}
-
-inline const char* os::line_separator() {
-  return "\n";
-}
-
-inline const char* os::path_separator() {
-  return ":";
-}
-
 // File names are case-sensitive on windows only
 inline int os::file_name_strcmp(const char* s1, const char* s2) {
   return strcmp(s1, s2);
--- a/hotspot/src/os/linux/vm/attachListener_linux.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/os/linux/vm/attachListener_linux.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,7 @@
 
 #include "precompiled.hpp"
 #include "runtime/interfaceSupport.hpp"
-#include "runtime/os.hpp"
+#include "runtime/os.inline.hpp"
 #include "services/attachListener.hpp"
 #include "services/dtraceAttacher.hpp"
 
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -36,6 +36,7 @@
 #include "memory/filemap.hpp"
 #include "mutex_linux.inline.hpp"
 #include "oops/oop.inline.hpp"
+#include "os_linux.inline.hpp"
 #include "os_share_linux.hpp"
 #include "prims/jniFastGetField.hpp"
 #include "prims/jvm.h"
@@ -1552,9 +1553,6 @@
   ::abort();
 }
 
-// unused on linux for now.
-void os::set_error_file(const char *logfile) {}
-
 
 // This method is a copy of JDK's sysGetLastErrorString
 // from src/solaris/hpi/src/system_md.c
@@ -2344,6 +2342,7 @@
         // determine if this is a legacy image or modules image
         // modules image doesn't have "jre" subdirectory
         len = strlen(buf);
+        assert(len < buflen, "Ran out of buffer room");
         jrelib_p = buf + len;
         snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
         if (0 != access(buf, F_OK)) {
@@ -2364,7 +2363,7 @@
     }
   }
 
-  strcpy(saved_jvm_path, buf);
+  strncpy(saved_jvm_path, buf, MAXPATHLEN);
 }
 
 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
@@ -3790,12 +3789,10 @@
   return DontYieldALot;
 }
 
-void os::yield() {
+void os::naked_yield() {
   sched_yield();
 }
 
-os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }
-
 ////////////////////////////////////////////////////////////////////////////////
 // thread priority support
 
@@ -5456,22 +5453,12 @@
   return abstime;
 }
 
-
-// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
-// Conceptually TryPark() should be equivalent to park(0).
-
-int os::PlatformEvent::TryPark() {
-  for (;;) {
-    const int v = _Event;
-    guarantee((v == 0) || (v == 1), "invariant");
-    if (Atomic::cmpxchg(0, &_Event, v) == v) return v;
-  }
-}
-
 void os::PlatformEvent::park() {       // AKA "down()"
   // Invariant: Only the thread associated with the Event/PlatformEvent
   // may call park().
   // TODO: assert that _Assoc != NULL or _Assoc == Self
+  assert(_nParked == 0, "invariant");
+
   int v;
   for (;;) {
       v = _Event;
@@ -5571,8 +5558,7 @@
   //    1 :=> 1
   //   -1 :=> either 0 or 1; must signal target thread
   //          That is, we can safely transition _Event from -1 to either
-  //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
-  //          unpark() calls.
+  //          0 or 1.
   // See also: "Semaphores in Plan 9" by Mullender & Cox
   //
   // Note: Forcing a transition from "-1" to "1" on an unpark() means
@@ -5800,10 +5786,9 @@
 }
 
 void Parker::unpark() {
-  int s, status;
-  status = pthread_mutex_lock(_mutex);
+  int status = pthread_mutex_lock(_mutex);
   assert(status == 0, "invariant");
-  s = _counter;
+  const int s = _counter;
   _counter = 1;
   if (s < 1) {
     // thread might be parked
--- a/hotspot/src/os/linux/vm/os_linux.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -315,7 +315,6 @@
     int  fired() { return _Event; }
     void park();
     void unpark();
-    int  TryPark();
     int  park(jlong millis); // relative timed-wait only
     void SetAssociation(Thread * a) { _Assoc = a; }
 };
--- a/hotspot/src/os/linux/vm/os_linux.inline.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.inline.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -25,8 +25,6 @@
 #ifndef OS_LINUX_VM_OS_LINUX_INLINE_HPP
 #define OS_LINUX_VM_OS_LINUX_INLINE_HPP
 
-#include "runtime/atomic.inline.hpp"
-#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 
 // System includes
@@ -40,18 +38,6 @@
   return pthread_getspecific((pthread_key_t)index);
 }
 
-inline const char* os::file_separator() {
-  return "/";
-}
-
-inline const char* os::line_separator() {
-  return "\n";
-}
-
-inline const char* os::path_separator() {
-  return ":";
-}
-
 // File names are case-sensitive on windows only
 inline int os::file_name_strcmp(const char* s1, const char* s2) {
   return strcmp(s1, s2);
--- a/hotspot/src/os/posix/vm/os_posix.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/os/posix/vm/os_posix.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,8 +22,16 @@
  *
  */
 
+#include "runtime/os.hpp"
+
 #ifndef OS_POSIX_VM_OS_POSIX_HPP
 #define OS_POSIX_VM_OS_POSIX_HPP
+
+// File conventions
+static const char* file_separator() { return "/"; }
+static const char* line_separator() { return "\n"; }
+static const char* path_separator() { return ":"; }
+
 class Posix {
   friend class os;
 
--- a/hotspot/src/os/solaris/vm/attachListener_solaris.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/os/solaris/vm/attachListener_solaris.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,7 @@
 
 #include "precompiled.hpp"
 #include "runtime/interfaceSupport.hpp"
-#include "runtime/os.hpp"
+#include "runtime/os.inline.hpp"
 #include "services/attachListener.hpp"
 #include "services/dtraceAttacher.hpp"
 
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -37,6 +37,7 @@
 #include "mutex_solaris.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "os_share_solaris.hpp"
+#include "os_solaris.inline.hpp"
 #include "prims/jniFastGetField.hpp"
 #include "prims/jvm.h"
 #include "prims/jvm_misc.hpp"
@@ -1542,9 +1543,6 @@
   ::abort(); // dump core (for debugging)
 }
 
-// unused
-void os::set_error_file(const char *logfile) {}
-
 // DLL functions
 
 const char* os::dll_file_extension() { return ".so"; }
@@ -2184,6 +2182,7 @@
         // determine if this is a legacy image or modules image
         // modules image doesn't have "jre" subdirectory
         len = strlen(buf);
+        assert(len < buflen, "Ran out of buffer space");
         jrelib_p = buf + len;
         snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
         if (0 != access(buf, F_OK)) {
@@ -2202,7 +2201,7 @@
     }
   }
 
-  strcpy(saved_jvm_path, buf);
+  strncpy(saved_jvm_path, buf, MAXPATHLEN);
 }
 
 
@@ -3172,20 +3171,14 @@
   }
 }
 
-// Caveat: Solaris os::yield() causes a thread-state transition whereas
-// the linux and win32 implementations do not.  This should be checked.
-
-void os::yield() {
-  // Yields to all threads with same or greater priority
-  os::sleep(Thread::current(), 0, false);
-}
-
 // Note that yield semantics are defined by the scheduling class to which
 // the thread currently belongs.  Typically, yield will _not yield to
 // other equal or higher priority threads that reside on the dispatch queues
 // of other CPUs.
 
-os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
+void os::naked_yield() {
+  thr_yield();
+}
 
 // Interface for setting lwp priorities.  If we are using T2 libthread,
 // which forces the use of BoundThreads or we manually set UseBoundThreads,
@@ -5439,20 +5432,11 @@
   return abstime;
 }
 
-// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
-// Conceptually TryPark() should be equivalent to park(0).
-
-int os::PlatformEvent::TryPark() {
-  for (;;) {
-    const int v = _Event;
-    guarantee((v == 0) || (v == 1), "invariant");
-    if (Atomic::cmpxchg(0, &_Event, v) == v) return v;
-  }
-}
-
 void os::PlatformEvent::park() {           // AKA: down()
   // Invariant: Only the thread associated with the Event/PlatformEvent
   // may call park().
+  assert(_nParked == 0, "invariant");
+
   int v;
   for (;;) {
       v = _Event;
@@ -5539,8 +5523,7 @@
   //    1 :=> 1
   //   -1 :=> either 0 or 1; must signal target thread
   //          That is, we can safely transition _Event from -1 to either
-  //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
-  //          unpark() calls.
+  //          0 or 1.
   // See also: "Semaphores in Plan 9" by Mullender & Cox
   //
   // Note: Forcing a transition from "-1" to "1" on an unpark() means
@@ -5744,10 +5727,9 @@
 }
 
 void Parker::unpark() {
-  int s, status;
-  status = os::Solaris::mutex_lock(_mutex);
+  int status = os::Solaris::mutex_lock(_mutex);
   assert(status == 0, "invariant");
-  s = _counter;
+  const int s = _counter;
   _counter = 1;
   status = os::Solaris::mutex_unlock(_mutex);
   assert(status == 0, "invariant");
--- a/hotspot/src/os/solaris/vm/os_solaris.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -332,7 +332,6 @@
     int  fired() { return _Event; }
     void park();
     int  park(jlong millis);
-    int  TryPark();
     void unpark();
 };
 
--- a/hotspot/src/os/solaris/vm/os_solaris.inline.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.inline.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -25,8 +25,6 @@
 #ifndef OS_SOLARIS_VM_OS_SOLARIS_INLINE_HPP
 #define OS_SOLARIS_VM_OS_SOLARIS_INLINE_HPP
 
-#include "runtime/atomic.inline.hpp"
-#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 
 // System includes
@@ -39,10 +37,6 @@
 #include <netdb.h>
 #include <setjmp.h>
 
-inline const char* os::file_separator() { return "/"; }
-inline const char* os::line_separator() { return "\n"; }
-inline const char* os::path_separator() { return ":"; }
-
 // File names are case-sensitive on windows only
 inline int os::file_name_strcmp(const char* s1, const char* s2) {
   return strcmp(s1, s2);
--- a/hotspot/src/os/windows/vm/decoder_windows.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/os/windows/vm/decoder_windows.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "prims/jvm.h"
 #include "runtime/arguments.hpp"
+#include "runtime/os.hpp"
 #include "decoder_windows.hpp"
 
 WindowsDecoder::WindowsDecoder() {
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -40,6 +40,7 @@
 #include "mutex_windows.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "os_share_windows.hpp"
+#include "os_windows.inline.hpp"
 #include "prims/jniFastGetField.hpp"
 #include "prims/jvm.h"
 #include "prims/jvm_misc.hpp"
@@ -1823,7 +1824,9 @@
     // looks like jvm.dll is installed there (append a fake suffix
     // hotspot/jvm.dll).
     char* java_home_var = ::getenv("JAVA_HOME");
-    if (java_home_var != NULL && java_home_var[0] != 0) {
+    if (java_home_var != NULL && java_home_var[0] != 0 &&
+        strlen(java_home_var) < (size_t)buflen) {
+
       strncpy(buf, java_home_var, buflen);
 
       // determine if this is a legacy image or modules image
@@ -1842,7 +1845,7 @@
   if (buf[0] == '\0') {
     GetModuleFileName(vm_lib_handle, buf, buflen);
   }
-  strcpy(saved_jvm_path, buf);
+  strncpy(saved_jvm_path, buf, MAX_PATH);
 }
 
 
@@ -2290,17 +2293,6 @@
   return EXCEPTION_CONTINUE_SEARCH;
 }
 
-// Fatal error reporting is single threaded so we can make this a
-// static and preallocated.  If it's more than MAX_PATH silently ignore
-// it.
-static char saved_error_file[MAX_PATH] = {0};
-
-void os::set_error_file(const char *logfile) {
-  if (strlen(logfile) <= MAX_PATH) {
-    strncpy(saved_error_file, logfile, MAX_PATH);
-  }
-}
-
 static inline void report_error(Thread* t, DWORD exception_code,
                                 address addr, void* siginfo, void* context) {
   VMError err(t, exception_code, addr, siginfo, context);
@@ -3514,18 +3506,15 @@
 
 typedef BOOL (WINAPI * STTSignature)(void);
 
-os::YieldResult os::NakedYield() {
+void os::naked_yield() {
   // Use either SwitchToThread() or Sleep(0)
   // Consider passing back the return value from SwitchToThread().
   if (os::Kernel32Dll::SwitchToThreadAvailable()) {
-    return SwitchToThread() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY;
+    SwitchToThread();
   } else {
     Sleep(0);
   }
-  return os::YIELD_UNKNOWN;
-}
-
-void os::yield() {  os::NakedYield(); }
+}
 
 // Win32 only gives you access to seven real priorities at a time,
 // so we compress Java's ten down to seven.  It would be better
@@ -4875,8 +4864,7 @@
   //    1 :=> 1
   //   -1 :=> either 0 or 1; must signal target thread
   //          That is, we can safely transition _Event from -1 to either
-  //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
-  //          unpark() calls.
+  //          0 or 1.
   // See also: "Semaphores in Plan 9" by Mullender & Cox
   //
   // Note: Forcing a transition from "-1" to "1" on an unpark() means
--- a/hotspot/src/os/windows/vm/os_windows.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -29,6 +29,11 @@
 // Information about the protection of the page at address '0' on this os.
 static bool zero_page_read_protected() { return true; }
 
+// File conventions
+static const char* file_separator() { return "\\"; }
+static const char* line_separator() { return "\r\n"; }
+static const char* path_separator() { return ";"; }
+
 class win32 {
   friend class os;
 
--- a/hotspot/src/os/windows/vm/os_windows.inline.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.inline.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -25,13 +25,8 @@
 #ifndef OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP
 #define OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP
 
-#include "runtime/atomic.inline.hpp"
-#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 
-inline const char* os::file_separator()                { return "\\"; }
-inline const char* os::line_separator()                { return "\r\n"; }
-inline const char* os::path_separator()                { return ";"; }
 inline const char* os::dll_file_extension()            { return ".dll"; }
 
 inline const int os::default_file_open_flags() { return O_BINARY | O_NOINHERIT;}
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1997,7 +1997,13 @@
   if (!UseInlineCaches && is_loaded && code == Bytecodes::_invokevirtual
       && !target->can_be_statically_bound()) {
     // Find a vtable index if one is available
-    vtable_index = target->resolve_vtable_index(calling_klass, callee_holder);
+    // For arrays, callee_holder is Object. Resolving the call with
+    // Object would allow an illegal call to finalize() on an
+    // array. We use holder instead: illegal calls to finalize() won't
+    // be compiled as vtable calls (IC call resolution will catch the
+    // illegal call) and the few legal calls on array types won't be
+    // either.
+    vtable_index = target->resolve_vtable_index(calling_klass, holder);
   }
 #endif
 
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1050,6 +1050,7 @@
               n_copy->set_data((intx) (load_klass()));
             } else {
               assert(mirror() != NULL, "klass not set");
+              // Don't need a G1 pre-barrier here since we assert above that data isn't an oop.
               n_copy->set_data(cast_from_oop<intx>(mirror()));
             }
 
--- a/hotspot/src/share/vm/ci/ciEnv.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/ci/ciEnv.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -185,6 +185,10 @@
     }
   }
 
+  void ensure_metadata_alive(ciMetadata* m) {
+    _factory->ensure_metadata_alive(m);
+  }
+
   ciInstance* get_instance(oop o) {
     if (o == NULL) return NULL;
     return get_object(o)->as_instance();
--- a/hotspot/src/share/vm/ci/ciKlass.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/ci/ciKlass.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -43,6 +43,7 @@
   friend class ciMethod;
   friend class ciMethodData;
   friend class ciObjArrayKlass;
+  friend class ciReceiverTypeData;
 
 private:
   ciSymbol* _name;
--- a/hotspot/src/share/vm/ci/ciMethodData.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/ci/ciMethodData.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -170,6 +170,7 @@
     Klass* k = data->as_ReceiverTypeData()->receiver(row);
     if (k != NULL) {
       ciKlass* klass = CURRENT_ENV->get_klass(k);
+      CURRENT_ENV->ensure_metadata_alive(klass);
       set_receiver(row, klass);
     }
   }
@@ -191,6 +192,7 @@
 void ciSpeculativeTrapData::translate_from(const ProfileData* data) {
   Method* m = data->as_SpeculativeTrapData()->method();
   ciMethod* ci_m = CURRENT_ENV->get_method(m);
+  CURRENT_ENV->ensure_metadata_alive(ci_m);
   set_method(ci_m);
 }
 
--- a/hotspot/src/share/vm/ci/ciMethodData.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/ci/ciMethodData.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -70,6 +70,7 @@
     Klass* v = TypeEntries::valid_klass(k);
     if (v != NULL) {
       ciKlass* klass = CURRENT_ENV->get_klass(v);
+      CURRENT_ENV->ensure_metadata_alive(klass);
       return with_status(klass, k);
     }
     return with_status(NULL, k);
--- a/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -46,6 +46,9 @@
 #include "oops/oop.inline.hpp"
 #include "oops/oop.inline2.hpp"
 #include "runtime/fieldType.hpp"
+#if INCLUDE_ALL_GCS
+# include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#endif
 
 // ciObjectFactory
 //
@@ -374,6 +377,37 @@
   return NULL;
 }
 
+// ------------------------------------------------------------------
+// ciObjectFactory::ensure_metadata_alive
+//
+// Ensure that the metadata wrapped by the ciMetadata is kept alive by GC.
+// This is primarily useful for metadata which is considered as weak roots
+// by the GC but need to be strong roots if reachable from a current compilation.
+//
+void ciObjectFactory::ensure_metadata_alive(ciMetadata* m) {
+  ASSERT_IN_VM; // We're handling raw oops here.
+
+#if INCLUDE_ALL_GCS
+  if (!UseG1GC) {
+    return;
+  }
+  Klass* metadata_owner_klass;
+  if (m->is_klass()) {
+    metadata_owner_klass = m->as_klass()->get_Klass();
+  } else if (m->is_method()) {
+    metadata_owner_klass = m->as_method()->get_Method()->constants()->pool_holder();
+  } else {
+    fatal("Not implemented for other types of metadata");
+  }
+
+  oop metadata_holder = metadata_owner_klass->klass_holder();
+  if (metadata_holder != NULL) {
+    G1SATBCardTableModRefBS::enqueue(metadata_holder);
+  }
+
+#endif
+}
+
 //------------------------------------------------------------------
 // ciObjectFactory::get_unloaded_method
 //
--- a/hotspot/src/share/vm/ci/ciObjectFactory.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -75,6 +75,8 @@
   ciObject* create_new_object(oop o);
   ciMetadata* create_new_object(Metadata* o);
 
+  void ensure_metadata_alive(ciMetadata* m);
+
   static bool is_equal(NonPermObject* p, oop key) {
     return p->object()->get_oop() == key;
   }
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -919,7 +919,7 @@
             "Wrong size %u for field's Signature attribute in class file %s",
             attribute_length, CHECK);
         }
-        generic_signature_index = cfs->get_u2(CHECK);
+        generic_signature_index = parse_generic_signature_attribute(CHECK);
       } else if (attribute_name == vmSymbols::tag_runtime_visible_annotations()) {
         if (runtime_visible_annotations != NULL) {
           classfile_parse_error(
@@ -2306,8 +2306,7 @@
             "Invalid Signature attribute length %u in class file %s",
             method_attribute_length, CHECK_(nullHandle));
         }
-        cfs->guarantee_more(2, CHECK_(nullHandle));  // generic_signature_index
-        generic_signature_index = cfs->get_u2_fast();
+        generic_signature_index = parse_generic_signature_attribute(CHECK_(nullHandle));
       } else if (method_attribute_name == vmSymbols::tag_runtime_visible_annotations()) {
         if (runtime_visible_annotations != NULL) {
           classfile_parse_error(
@@ -2644,6 +2643,17 @@
   return method_ordering;
 }
 
+// Parse generic_signature attribute for methods and fields
+u2 ClassFileParser::parse_generic_signature_attribute(TRAPS) {
+  ClassFileStream* cfs = stream();
+  cfs->guarantee_more(2, CHECK_0);  // generic_signature_index
+  u2 generic_signature_index = cfs->get_u2_fast();
+  check_property(
+    valid_symbol_at(generic_signature_index),
+    "Invalid Signature attribute at constant pool index %u in class file %s",
+    generic_signature_index, CHECK_0);
+  return generic_signature_index;
+}
 
 void ClassFileParser::parse_classfile_sourcefile_attribute(TRAPS) {
   ClassFileStream* cfs = stream();
@@ -2798,17 +2808,19 @@
   ClassFileStream* cfs = stream();
   u1* current_start = cfs->current();
 
-  cfs->guarantee_more(2, CHECK);  // length
+  guarantee_property(attribute_byte_length >= sizeof(u2),
+                     "Invalid BootstrapMethods attribute length %u in class file %s",
+                     attribute_byte_length,
+                     CHECK);
+
+  cfs->guarantee_more(attribute_byte_length, CHECK);
+
   int attribute_array_length = cfs->get_u2_fast();
 
   guarantee_property(_max_bootstrap_specifier_index < attribute_array_length,
                      "Short length on BootstrapMethods in class file %s",
                      CHECK);
 
-  guarantee_property(attribute_byte_length >= sizeof(u2),
-                     "Invalid BootstrapMethods attribute length %u in class file %s",
-                     attribute_byte_length,
-                     CHECK);
 
   // The attribute contains a counted array of counted tuples of shorts,
   // represending bootstrap specifiers:
@@ -4590,8 +4602,9 @@
             Exceptions::fthrow(
               THREAD_AND_LOCATION,
               vmSymbols::java_lang_VerifyError(),
-              "class %s overrides final method %s.%s",
+              "class %s overrides final method %s.%s%s",
               this_klass->external_name(),
+              super_m->method_holder()->external_name(),
               name->as_C_string(),
               signature->as_C_string()
             );
--- a/hotspot/src/share/vm/classfile/classFileParser.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -266,6 +266,7 @@
   u1* parse_stackmap_table(u4 code_attribute_length, TRAPS);
 
   // Classfile attribute parsing
+  u2 parse_generic_signature_attribute(TRAPS);
   void parse_classfile_sourcefile_attribute(TRAPS);
   void parse_classfile_source_debug_extension_attribute(int length, TRAPS);
   u2   parse_classfile_inner_classes_attribute(u1* inner_classes_attribute_start,
--- a/hotspot/src/share/vm/classfile/classLoader.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/classfile/classLoader.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -52,6 +52,7 @@
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/java.hpp"
 #include "runtime/javaCalls.hpp"
+#include "runtime/os.hpp"
 #include "runtime/threadCritical.hpp"
 #include "runtime/timer.hpp"
 #include "services/management.hpp"
@@ -59,22 +60,6 @@
 #include "utilities/events.hpp"
 #include "utilities/hashtable.hpp"
 #include "utilities/hashtable.inline.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_aix
-# include "os_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
-
 
 // Entry points in zip.dll for loading zip/jar file entries
 
--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -332,6 +332,27 @@
   }
 }
 
+#ifdef ASSERT
+class AllAliveClosure : public OopClosure {
+  BoolObjectClosure* _is_alive_closure;
+  bool _found_dead;
+ public:
+  AllAliveClosure(BoolObjectClosure* is_alive_closure) : _is_alive_closure(is_alive_closure), _found_dead(false) {}
+  template <typename T> void do_oop_work(T* p) {
+    T heap_oop = oopDesc::load_heap_oop(p);
+    if (!oopDesc::is_null(heap_oop)) {
+      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+      if (!_is_alive_closure->do_object_b(obj)) {
+        _found_dead = true;
+      }
+    }
+  }
+  void do_oop(oop* p)       { do_oop_work<oop>(p); }
+  void do_oop(narrowOop* p) { do_oop_work<narrowOop>(p); }
+  bool found_dead()         { return _found_dead; }
+};
+#endif
+
 oop ClassLoaderData::keep_alive_object() const {
   assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive");
   return is_anonymous() ? _klasses->java_mirror() : class_loader();
@@ -341,7 +362,15 @@
   bool alive = keep_alive() // null class loader and incomplete anonymous klasses.
       || is_alive_closure->do_object_b(keep_alive_object());
 
-  assert(!alive || claimed(), "must be claimed");
+#ifdef ASSERT
+  if (alive) {
+    AllAliveClosure all_alive_closure(is_alive_closure);
+    KlassToOopClosure klass_closure(&all_alive_closure);
+    const_cast<ClassLoaderData*>(this)->oops_do(&all_alive_closure, &klass_closure, false);
+    assert(!all_alive_closure.found_dead(), err_msg("Found dead oop in alive cld: " PTR_FORMAT, p2i(this)));
+  }
+#endif
+
   return alive;
 }
 
@@ -620,9 +649,9 @@
 
 void ClassLoaderDataGraph::always_strong_oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) {
   if (ClassUnloading) {
-    ClassLoaderDataGraph::keep_alive_oops_do(f, klass_closure, must_claim);
+    keep_alive_oops_do(f, klass_closure, must_claim);
   } else {
-    ClassLoaderDataGraph::oops_do(f, klass_closure, must_claim);
+    oops_do(f, klass_closure, must_claim);
   }
 }
 
@@ -632,6 +661,27 @@
   }
 }
 
+void ClassLoaderDataGraph::roots_cld_do(CLDClosure* strong, CLDClosure* weak) {
+  for (ClassLoaderData* cld = _head;  cld != NULL; cld = cld->_next) {
+    CLDClosure* closure = cld->keep_alive() ? strong : weak;
+    if (closure != NULL) {
+      closure->do_cld(cld);
+    }
+  }
+}
+
+void ClassLoaderDataGraph::keep_alive_cld_do(CLDClosure* cl) {
+  roots_cld_do(cl, NULL);
+}
+
+void ClassLoaderDataGraph::always_strong_cld_do(CLDClosure* cl) {
+  if (ClassUnloading) {
+    keep_alive_cld_do(cl);
+  } else {
+    cld_do(cl);
+  }
+}
+
 void ClassLoaderDataGraph::classes_do(KlassClosure* klass_closure) {
   for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
     cld->classes_do(klass_closure);
@@ -689,6 +739,16 @@
   return array;
 }
 
+bool ClassLoaderDataGraph::unload_list_contains(const void* x) {
+  assert(SafepointSynchronize::is_at_safepoint(), "only safe to call at safepoint");
+  for (ClassLoaderData* cld = _unloading; cld != NULL; cld = cld->next()) {
+    if (cld->metaspace_or_null() != NULL && cld->metaspace_or_null()->contains(x)) {
+      return true;
+    }
+  }
+  return false;
+}
+
 #ifndef PRODUCT
 bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
   for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
@@ -809,6 +869,60 @@
   return _rw_metaspace;
 }
 
+ClassLoaderDataGraphKlassIteratorAtomic::ClassLoaderDataGraphKlassIteratorAtomic()
+    : _next_klass(NULL) {
+  ClassLoaderData* cld = ClassLoaderDataGraph::_head;
+  Klass* klass = NULL;
+
+  // Find the first klass in the CLDG.
+  while (cld != NULL) {
+    klass = cld->_klasses;
+    if (klass != NULL) {
+      _next_klass = klass;
+      return;
+    }
+    cld = cld->next();
+  }
+}
+
+Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass_in_cldg(Klass* klass) {
+  Klass* next = klass->next_link();
+  if (next != NULL) {
+    return next;
+  }
+
+  // No more klasses in the current CLD. Time to find a new CLD.
+  ClassLoaderData* cld = klass->class_loader_data();
+  while (next == NULL) {
+    cld = cld->next();
+    if (cld == NULL) {
+      break;
+    }
+    next = cld->_klasses;
+  }
+
+  return next;
+}
+
+Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass() {
+  Klass* head = (Klass*)_next_klass;
+
+  while (head != NULL) {
+    Klass* next = next_klass_in_cldg(head);
+
+    Klass* old_head = (Klass*)Atomic::cmpxchg_ptr(next, &_next_klass, head);
+
+    if (old_head == head) {
+      return head; // Won the CAS.
+    }
+
+    head = old_head;
+  }
+
+  // Nothing more for the iterator to hand out.
+  assert(head == NULL, err_msg("head is " PTR_FORMAT ", expected not null:", p2i(head)));
+  return NULL;
+}
 
 ClassLoaderDataGraphMetaspaceIterator::ClassLoaderDataGraphMetaspaceIterator() {
   _data = ClassLoaderDataGraph::_head;
--- a/hotspot/src/share/vm/classfile/classLoaderData.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/classfile/classLoaderData.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -31,7 +31,6 @@
 #include "memory/metaspaceCounters.hpp"
 #include "runtime/mutex.hpp"
 #include "utilities/growableArray.hpp"
-
 #if INCLUDE_TRACE
 # include "utilities/ticks.hpp"
 #endif
@@ -59,6 +58,7 @@
 class ClassLoaderDataGraph : public AllStatic {
   friend class ClassLoaderData;
   friend class ClassLoaderDataGraphMetaspaceIterator;
+  friend class ClassLoaderDataGraphKlassIteratorAtomic;
   friend class VMStructs;
  private:
   // All CLDs (except the null CLD) can be reached by walking _head->_next->...
@@ -75,10 +75,16 @@
   static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
   static void purge();
   static void clear_claimed_marks();
+  // oops do
   static void oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim);
+  static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
   static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
-  static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
+  // cld do
   static void cld_do(CLDClosure* cl);
+  static void roots_cld_do(CLDClosure* strong, CLDClosure* weak);
+  static void keep_alive_cld_do(CLDClosure* cl);
+  static void always_strong_cld_do(CLDClosure* cl);
+  // klass do
   static void classes_do(KlassClosure* klass_closure);
   static void classes_do(void f(Klass* const));
   static void methods_do(void f(Method*));
@@ -104,6 +110,7 @@
   static void dump() { dump_on(tty); }
   static void verify();
 
+  static bool unload_list_contains(const void* x);
 #ifndef PRODUCT
   static bool contains_loader_data(ClassLoaderData* loader_data);
 #endif
@@ -136,6 +143,7 @@
   };
 
   friend class ClassLoaderDataGraph;
+  friend class ClassLoaderDataGraphKlassIteratorAtomic;
   friend class ClassLoaderDataGraphMetaspaceIterator;
   friend class MetaDataFactory;
   friend class Method;
@@ -195,7 +203,6 @@
 
   void unload();
   bool keep_alive() const       { return _keep_alive; }
-  bool is_alive(BoolObjectClosure* is_alive_closure) const;
   void classes_do(void f(Klass*));
   void loaded_classes_do(KlassClosure* klass_closure);
   void classes_do(void f(InstanceKlass*));
@@ -208,6 +215,9 @@
   MetaWord* allocate(size_t size);
 
  public:
+
+  bool is_alive(BoolObjectClosure* is_alive_closure) const;
+
   // Accessors
   Metaspace* metaspace_or_null() const     { return _metaspace; }
 
@@ -293,6 +303,16 @@
   void initialize_shared_metaspaces();
 };
 
+// An iterator that distributes Klasses to parallel worker threads.
+class ClassLoaderDataGraphKlassIteratorAtomic : public StackObj {
+  volatile Klass* _next_klass;
+ public:
+  ClassLoaderDataGraphKlassIteratorAtomic();
+  Klass* next_klass();
+ private:
+  static Klass* next_klass_in_cldg(Klass* klass);
+};
+
 class ClassLoaderDataGraphMetaspaceIterator : public StackObj {
   ClassLoaderData* _data;
  public:
--- a/hotspot/src/share/vm/classfile/dictionary.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/classfile/dictionary.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -199,6 +199,26 @@
   return class_was_unloaded;
 }
 
+void Dictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
+  // Skip the strong roots probe marking if the closures are the same.
+  if (strong == weak) {
+    oops_do(strong);
+    return;
+  }
+
+  for (int index = 0; index < table_size(); index++) {
+    for (DictionaryEntry *probe = bucket(index);
+                          probe != NULL;
+                          probe = probe->next()) {
+      Klass* e = probe->klass();
+      ClassLoaderData* loader_data = probe->loader_data();
+      if (is_strongly_reachable(loader_data, e)) {
+        probe->set_strongly_reachable();
+      }
+    }
+  }
+  _pd_cache_table->roots_oops_do(strong, weak);
+}
 
 void Dictionary::always_strong_oops_do(OopClosure* blk) {
   // Follow all system classes and temporary placeholders in dictionary; only
@@ -490,6 +510,23 @@
   }
 }
 
+void ProtectionDomainCacheTable::roots_oops_do(OopClosure* strong, OopClosure* weak) {
+  for (int index = 0; index < table_size(); index++) {
+    for (ProtectionDomainCacheEntry* probe = bucket(index);
+                                     probe != NULL;
+                                     probe = probe->next()) {
+      if (probe->is_strongly_reachable()) {
+        probe->reset_strongly_reachable();
+        probe->oops_do(strong);
+      } else {
+        if (weak != NULL) {
+          probe->oops_do(weak);
+        }
+      }
+    }
+  }
+}
+
 uint ProtectionDomainCacheTable::bucket_size() {
   return sizeof(ProtectionDomainCacheEntry);
 }
--- a/hotspot/src/share/vm/classfile/dictionary.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/classfile/dictionary.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -89,6 +89,7 @@
   // GC support
   void oops_do(OopClosure* f);
   void always_strong_oops_do(OopClosure* blk);
+  void roots_oops_do(OopClosure* strong, OopClosure* weak);
 
   void always_strong_classes_do(KlassClosure* closure);
 
@@ -218,6 +219,7 @@
   // GC support
   void oops_do(OopClosure* f);
   void always_strong_oops_do(OopClosure* f);
+  void roots_oops_do(OopClosure* strong, OopClosure* weak);
 
   static uint bucket_size();
 
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -618,6 +618,8 @@
       assert(comp_mirror.not_null(), "must have a mirror");
 
       // Two-way link between the array klass and its component mirror:
+      // (array_klass) k -> mirror -> component_mirror -> array_klass -> k
+      set_component_mirror(mirror(), comp_mirror());
       ArrayKlass::cast(k())->set_component_mirror(comp_mirror());
       set_array_klass(comp_mirror(), k());
     } else {
@@ -679,6 +681,16 @@
   java_class->obj_field_put(_protection_domain_offset, pd);
 }
 
+void java_lang_Class::set_component_mirror(oop java_class, oop comp_mirror) {
+  if (_component_mirror_offset != 0) {
+    java_class->obj_field_put(_component_mirror_offset, comp_mirror);
+  }
+}
+oop java_lang_Class::component_mirror(oop java_class) {
+  assert(_component_mirror_offset != 0, "must be set");
+  return java_class->obj_field(_component_mirror_offset);
+}
+
 oop java_lang_Class::init_lock(oop java_class) {
   assert(_init_lock_offset != 0, "must be set");
   return java_class->obj_field(_init_lock_offset);
@@ -875,6 +887,10 @@
                  klass_oop, vmSymbols::classLoader_name(),
                  vmSymbols::classloader_signature());
 
+  compute_optional_offset(_component_mirror_offset,
+                 klass_oop, vmSymbols::componentType_name(),
+                 vmSymbols::class_signature());
+
   CLASS_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET);
 }
 
@@ -3097,6 +3113,7 @@
 int java_lang_Class::_static_oop_field_count_offset;
 int java_lang_Class::_class_loader_offset;
 int java_lang_Class::_protection_domain_offset;
+int java_lang_Class::_component_mirror_offset;
 int java_lang_Class::_init_lock_offset;
 int java_lang_Class::_signers_offset;
 GrowableArray<Klass*>* java_lang_Class::_fixup_mirror_list = NULL;
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -241,6 +241,7 @@
   static int _init_lock_offset;
   static int _signers_offset;
   static int _class_loader_offset;
+  static int _component_mirror_offset;
 
   static bool offsets_computed;
   static int classRedefinedCount_offset;
@@ -250,6 +251,7 @@
   static void set_init_lock(oop java_class, oop init_lock);
   static void set_protection_domain(oop java_class, oop protection_domain);
   static void set_class_loader(oop java_class, oop class_loader);
+  static void set_component_mirror(oop java_class, oop comp_mirror);
   static void initialize_mirror_fields(KlassHandle k, Handle mirror, Handle protection_domain, TRAPS);
  public:
   static void compute_offsets();
@@ -291,6 +293,7 @@
   // Support for embedded per-class oops
   static oop  protection_domain(oop java_class);
   static oop  init_lock(oop java_class);
+  static oop  component_mirror(oop java_class);
   static objArrayOop  signers(oop java_class);
   static void set_signers(oop java_class, objArrayOop signers);
 
--- a/hotspot/src/share/vm/classfile/metadataOnStackMark.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/classfile/metadataOnStackMark.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -47,8 +47,11 @@
   if (_marked_objects == NULL) {
     _marked_objects = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(1000, true);
   }
+
   Threads::metadata_do(Metadata::mark_on_stack);
-  CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
+  if (JvmtiExport::has_redefined_a_class()) {
+    CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
+  }
   CompileBroker::mark_on_stack();
   JvmtiCurrentBreakpoints::metadata_do(Metadata::mark_on_stack);
   ThreadService::metadata_do(Metadata::mark_on_stack);
--- a/hotspot/src/share/vm/classfile/stackMapTable.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/classfile/stackMapTable.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -134,6 +134,7 @@
   }
   // check if uninitialized objects exist on backward branches
   check_new_object(frame, target, CHECK_VERIFY(frame->verifier()));
+  frame->verifier()->update_furthest_jump(target);
 }
 
 void StackMapTable::check_new_object(
--- a/hotspot/src/share/vm/classfile/stringTable.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/classfile/stringTable.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -37,6 +37,7 @@
 #include "runtime/mutexLocker.hpp"
 #include "utilities/hashtable.inline.hpp"
 #if INCLUDE_ALL_GCS
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/g1StringDedup.hpp"
 #endif
 
@@ -157,11 +158,26 @@
   return lookup(chars, length);
 }
 
+// Tell the GC that this string was looked up in the StringTable.
+static void ensure_string_alive(oop string) {
+  // A lookup in the StringTable could return an object that was previously
+  // considered dead. The SATB part of G1 needs to get notified about this
+  // potential resurrection, otherwise the marking might not find the object.
+#if INCLUDE_ALL_GCS
+  if (UseG1GC && string != NULL) {
+    G1SATBCardTableModRefBS::enqueue(string);
+  }
+#endif
+}
 
 oop StringTable::lookup(jchar* name, int len) {
   unsigned int hash = hash_string(name, len);
   int index = the_table()->hash_to_index(hash);
-  return the_table()->lookup(index, name, len, hash);
+  oop string = the_table()->lookup(index, name, len, hash);
+
+  ensure_string_alive(string);
+
+  return string;
 }
 
 
@@ -172,7 +188,10 @@
   oop found_string = the_table()->lookup(index, name, len, hashValue);
 
   // Found
-  if (found_string != NULL) return found_string;
+  if (found_string != NULL) {
+    ensure_string_alive(found_string);
+    return found_string;
+  }
 
   debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
   assert(!Universe::heap()->is_in_reserved(name),
@@ -197,11 +216,17 @@
 
   // Grab the StringTable_lock before getting the_table() because it could
   // change at safepoint.
-  MutexLocker ml(StringTable_lock, THREAD);
+  oop added_or_found;
+  {
+    MutexLocker ml(StringTable_lock, THREAD);
+    // Otherwise, add to symbol to table
+    added_or_found = the_table()->basic_add(index, string, name, len,
+                                  hashValue, CHECK_NULL);
+  }
 
-  // Otherwise, add to symbol to table
-  return the_table()->basic_add(index, string, name, len,
-                                hashValue, CHECK_NULL);
+  ensure_string_alive(added_or_found);
+
+  return added_or_found;
 }
 
 oop StringTable::intern(Symbol* symbol, TRAPS) {
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1612,13 +1612,7 @@
 // system dictionary and follows the remaining classes' contents.
 
 void SystemDictionary::always_strong_oops_do(OopClosure* blk) {
-  blk->do_oop(&_java_system_loader);
-  blk->do_oop(&_system_loader_lock_obj);
-
-  dictionary()->always_strong_oops_do(blk);
-
-  // Visit extra methods
-  invoke_method_table()->oops_do(blk);
+  roots_oops_do(blk, NULL);
 }
 
 void SystemDictionary::always_strong_classes_do(KlassClosure* closure) {
@@ -1685,6 +1679,17 @@
   return unloading_occurred;
 }
 
+void SystemDictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
+  strong->do_oop(&_java_system_loader);
+  strong->do_oop(&_system_loader_lock_obj);
+
+  // Adjust dictionary
+  dictionary()->roots_oops_do(strong, weak);
+
+  // Visit extra methods
+  invoke_method_table()->oops_do(strong);
+}
+
 void SystemDictionary::oops_do(OopClosure* f) {
   f->do_oop(&_java_system_loader);
   f->do_oop(&_system_loader_lock_obj);
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -330,6 +330,7 @@
 
   // Applies "f->do_oop" to all root oops in the system dictionary.
   static void oops_do(OopClosure* f);
+  static void roots_oops_do(OopClosure* strong, OopClosure* weak);
 
   // System loader lock
   static oop system_loader_lock()           { return _system_loader_lock_obj; }
--- a/hotspot/src/share/vm/classfile/verifier.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/classfile/verifier.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -633,6 +633,9 @@
   bool no_control_flow = false; // Set to true when there is no direct control
                                 // flow from current instruction to the next
                                 // instruction in sequence
+
+  set_furthest_jump(0);
+
   Bytecodes::Code opcode;
   while (!bcs.is_last_bytecode()) {
     // Check for recursive re-verification before each bytecode.
@@ -2248,6 +2251,29 @@
           "Bad <init> method call");
       return;
     }
+
+    // Make sure that this call is not jumped over.
+    if (bci < furthest_jump()) {
+      verify_error(ErrorContext::bad_code(bci),
+                   "Bad <init> method call from inside of a branch");
+      return;
+    }
+
+    // Make sure that this call is not done from within a TRY block because
+    // that can result in returning an incomplete object.  Simply checking
+    // (bci >= start_pc) also ensures that this call is not done after a TRY
+    // block.  That is also illegal because this call must be the first Java
+    // statement in the constructor.
+    ExceptionTable exhandlers(_method());
+    int exlength = exhandlers.length();
+    for(int i = 0; i < exlength; i++) {
+      if (bci >= exhandlers.start_pc(i)) {
+        verify_error(ErrorContext::bad_code(bci),
+                     "Bad <init> method call from after the start of a try block");
+        return;
+      }
+    }
+
     current_frame->initialize_object(type, current_type());
     *this_uninit = true;
   } else if (type.is_uninitialized()) {
@@ -2285,16 +2311,19 @@
         vmSymbols::object_initializer_name(),
         cp->signature_ref_at(bcs->get_index_u2()),
         Klass::normal);
-      instanceKlassHandle mh(THREAD, m->method_holder());
-      if (m->is_protected() && !mh->is_same_class_package(_klass())) {
-        bool assignable = current_type().is_assignable_from(
-          objectref_type, this, CHECK_VERIFY(this));
-        if (!assignable) {
-          verify_error(ErrorContext::bad_type(bci,
-              TypeOrigin::cp(new_class_index, objectref_type),
-              TypeOrigin::implicit(current_type())),
-              "Bad access to protected <init> method");
-          return;
+      // Do nothing if method is not found.  Let resolution detect the error.
+      if (m != NULL) {
+        instanceKlassHandle mh(THREAD, m->method_holder());
+        if (m->is_protected() && !mh->is_same_class_package(_klass())) {
+          bool assignable = current_type().is_assignable_from(
+            objectref_type, this, CHECK_VERIFY(this));
+          if (!assignable) {
+            verify_error(ErrorContext::bad_type(bci,
+                TypeOrigin::cp(new_class_index, objectref_type),
+                TypeOrigin::implicit(current_type())),
+                "Bad access to protected <init> method");
+            return;
+          }
         }
       }
     }
--- a/hotspot/src/share/vm/classfile/verifier.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/classfile/verifier.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -258,6 +258,9 @@
 
   ErrorContext _error_context;  // contains information about an error
 
+  // Used to detect illegal jumps over calls to super() nd this() in ctors.
+  int32_t _furthest_jump;
+
   void verify_method(methodHandle method, TRAPS);
   char* generate_code_data(methodHandle m, u4 code_length, TRAPS);
   void verify_exception_handler_table(u4 code_length, char* code_data,
@@ -403,6 +406,20 @@
   Symbol* create_temporary_symbol(const char *s, int length, TRAPS);
 
   TypeOrigin ref_ctx(const char* str, TRAPS);
+
+  // Keep track of the furthest branch done in a method to make sure that
+  // there are no branches over calls to super() or this() from inside of
+  // a constructor.
+  int32_t furthest_jump() { return _furthest_jump; }
+
+  void set_furthest_jump(int32_t target) {
+    _furthest_jump = target;
+  }
+
+  void update_furthest_jump(int32_t target) {
+    if (target > _furthest_jump) _furthest_jump = target;
+  }
+
 };
 
 inline int ClassVerifier::change_sig_to_verificationType(
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -573,6 +573,7 @@
   template(serializeAgentPropertiesToByteArray_name,   "serializeAgentPropertiesToByteArray")                     \
   template(classRedefinedCount_name,                   "classRedefinedCount")                                     \
   template(classLoader_name,                           "classLoader")                                             \
+  template(componentType_name,                         "componentType")                                           \
                                                                                                                   \
   /* trace signatures */                                                                                          \
   TRACE_TEMPLATES(template)                                                                                       \
--- a/hotspot/src/share/vm/code/codeCache.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/code/codeCache.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -331,6 +331,11 @@
 // Walk the list of methods which might contain non-perm oops.
 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
   assert_locked_or_safepoint(CodeCache_lock);
+
+  if (UseG1GC) {
+    return;
+  }
+
   debug_only(mark_scavenge_root_nmethods());
 
   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
@@ -356,6 +361,11 @@
 
 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
   assert_locked_or_safepoint(CodeCache_lock);
+
+  if (UseG1GC) {
+    return;
+  }
+
   nm->set_on_scavenge_root_list();
   nm->set_scavenge_root_link(_scavenge_root_nmethods);
   set_scavenge_root_nmethods(nm);
@@ -364,6 +374,11 @@
 
 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
   assert_locked_or_safepoint(CodeCache_lock);
+
+  if (UseG1GC) {
+    return;
+  }
+
   print_trace("drop_scavenge_root", nm);
   nmethod* last = NULL;
   nmethod* cur = scavenge_root_nmethods();
@@ -385,6 +400,11 @@
 
 void CodeCache::prune_scavenge_root_nmethods() {
   assert_locked_or_safepoint(CodeCache_lock);
+
+  if (UseG1GC) {
+    return;
+  }
+
   debug_only(mark_scavenge_root_nmethods());
 
   nmethod* last = NULL;
@@ -417,6 +437,10 @@
 
 #ifndef PRODUCT
 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
+  if (UseG1GC) {
+    return;
+  }
+
   // While we are here, verify the integrity of the list.
   mark_scavenge_root_nmethods();
   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
@@ -457,9 +481,36 @@
 }
 #endif //PRODUCT
 
+void CodeCache::verify_clean_inline_caches() {
+#ifdef ASSERT
+  FOR_ALL_ALIVE_BLOBS(cb) {
+    if (cb->is_nmethod()) {
+      nmethod* nm = (nmethod*)cb;
+      assert(!nm->is_unloaded(), "Tautology");
+      nm->verify_clean_inline_caches();
+      nm->verify();
+    }
+  }
+#endif
+}
+
+void CodeCache::verify_icholder_relocations() {
+#ifdef ASSERT
+  // make sure that we aren't leaking icholders
+  int count = 0;
+  FOR_ALL_BLOBS(cb) {
+    if (cb->is_nmethod()) {
+      nmethod* nm = (nmethod*)cb;
+      count += nm->verify_icholder_relocations();
+    }
+  }
+
+  assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
+         CompiledICHolder::live_count(), "must agree");
+#endif
+}
 
 void CodeCache::gc_prologue() {
-  assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
 }
 
 void CodeCache::gc_epilogue() {
@@ -472,41 +523,15 @@
         nm->cleanup_inline_caches();
       }
       DEBUG_ONLY(nm->verify());
-      nm->fix_oop_relocations();
+      DEBUG_ONLY(nm->verify_oop_relocations());
     }
   }
   set_needs_cache_clean(false);
   prune_scavenge_root_nmethods();
-  assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
 
-#ifdef ASSERT
-  // make sure that we aren't leaking icholders
-  int count = 0;
-  FOR_ALL_BLOBS(cb) {
-    if (cb->is_nmethod()) {
-      RelocIterator iter((nmethod*)cb);
-      while(iter.next()) {
-        if (iter.type() == relocInfo::virtual_call_type) {
-          if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
-            CompiledIC *ic = CompiledIC_at(iter.reloc());
-            if (TraceCompiledIC) {
-              tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
-              ic->print();
-            }
-            assert(ic->cached_icholder() != NULL, "must be non-NULL");
-            count++;
-          }
-        }
-      }
-    }
-  }
-
-  assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
-         CompiledICHolder::live_count(), "must agree");
-#endif
+  verify_icholder_relocations();
 }
 
-
 void CodeCache::verify_oops() {
   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   VerifyOopClosure voc;
--- a/hotspot/src/share/vm/code/codeCache.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/code/codeCache.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -134,10 +134,6 @@
   // to) any unmarked codeBlobs in the cache.  Sets "marked_for_unloading"
   // to "true" iff some code got unloaded.
   static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
-  static void oops_do(OopClosure* f) {
-    CodeBlobToOopClosure oopc(f, /*do_marking=*/ false);
-    blobs_do(&oopc);
-  }
   static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
   static void scavenge_root_nmethods_do(CodeBlobClosure* f);
 
@@ -173,6 +169,9 @@
   static void set_needs_cache_clean(bool v)      { _needs_cache_clean = v;    }
   static void clear_inline_caches();             // clear all inline caches
 
+  static void verify_clean_inline_caches();
+  static void verify_icholder_relocations();
+
   // Deoptimization
   static int  mark_for_deoptimization(DepChange& changes);
 #ifdef HOTSWAP
--- a/hotspot/src/share/vm/code/compiledIC.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/code/compiledIC.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -99,13 +99,13 @@
   }
 
   {
-  MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
+    MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
 #ifdef ASSERT
-  CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
-  assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
+    CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
+    assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
 #endif
-  _ic_call->set_destination_mt_safe(entry_point);
-}
+     _ic_call->set_destination_mt_safe(entry_point);
+  }
 
   if (is_optimized() || is_icstub) {
     // Optimized call sites don't have a cache value and ICStub call
@@ -159,10 +159,24 @@
 //-----------------------------------------------------------------------------
 // High-level access to an inline cache. Guaranteed to be MT-safe.
 
+void CompiledIC::initialize_from_iter(RelocIterator* iter) {
+  assert(iter->addr() == _ic_call->instruction_address(), "must find ic_call");
+
+  if (iter->type() == relocInfo::virtual_call_type) {
+    virtual_call_Relocation* r = iter->virtual_call_reloc();
+    _is_optimized = false;
+    _value = nativeMovConstReg_at(r->cached_value());
+  } else {
+    assert(iter->type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
+    _is_optimized = true;
+    _value = NULL;
+  }
+}
+
 CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
   : _ic_call(call)
 {
-  address ic_call = call->instruction_address();
+  address ic_call = _ic_call->instruction_address();
 
   assert(ic_call != NULL, "ic_call address must be set");
   assert(nm != NULL, "must pass nmethod");
@@ -173,15 +187,21 @@
   bool ret = iter.next();
   assert(ret == true, "relocInfo must exist at this address");
   assert(iter.addr() == ic_call, "must find ic_call");
-  if (iter.type() == relocInfo::virtual_call_type) {
-    virtual_call_Relocation* r = iter.virtual_call_reloc();
-    _is_optimized = false;
-    _value = nativeMovConstReg_at(r->cached_value());
-  } else {
-    assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
-    _is_optimized = true;
-    _value = NULL;
-  }
+
+  initialize_from_iter(&iter);
+}
+
+CompiledIC::CompiledIC(RelocIterator* iter)
+  : _ic_call(nativeCall_at(iter->addr()))
+{
+  address ic_call = _ic_call->instruction_address();
+
+  nmethod* nm = iter->code();
+  assert(ic_call != NULL, "ic_call address must be set");
+  assert(nm != NULL, "must pass nmethod");
+  assert(nm->contains(ic_call), "must be in nmethod");
+
+  initialize_from_iter(iter);
 }
 
 bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
@@ -509,7 +529,7 @@
 void CompiledStaticCall::set_to_clean() {
   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
   // Reset call site
-  MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
+  MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
 #ifdef ASSERT
   CodeBlob* cb = CodeCache::find_blob_unsafe(this);
   assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
--- a/hotspot/src/share/vm/code/compiledIC.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/code/compiledIC.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -150,6 +150,9 @@
   bool          _is_optimized;  // an optimized virtual call (i.e., no compiled IC)
 
   CompiledIC(nmethod* nm, NativeCall* ic_call);
+  CompiledIC(RelocIterator* iter);
+
+  void initialize_from_iter(RelocIterator* iter);
 
   static bool is_icholder_entry(address entry);
 
@@ -183,6 +186,7 @@
   friend CompiledIC* CompiledIC_before(nmethod* nm, address return_addr);
   friend CompiledIC* CompiledIC_at(nmethod* nm, address call_site);
   friend CompiledIC* CompiledIC_at(Relocation* call_site);
+  friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter);
 
   // This is used to release CompiledICHolder*s from nmethods that
   // are about to be freed.  The callsite might contain other stale
@@ -263,6 +267,13 @@
   return c_ic;
 }
 
+inline CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) {
+  assert(reloc_iter->type() == relocInfo::virtual_call_type ||
+      reloc_iter->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
+  CompiledIC* c_ic = new CompiledIC(reloc_iter);
+  c_ic->verify();
+  return c_ic;
+}
 
 //-----------------------------------------------------------------------------
 // The CompiledStaticCall represents a call to a static method in the compiled
--- a/hotspot/src/share/vm/code/nmethod.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -51,6 +51,8 @@
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
+unsigned char nmethod::_global_unloading_clock = 0;
+
 #ifdef DTRACE_ENABLED
 
 // Only bother with this argument setup if dtrace is available
@@ -446,6 +448,7 @@
 // Fill in default values for various flag fields
 void nmethod::init_defaults() {
   _state                      = in_use;
+  _unloading_clock            = 0;
   _marked_for_reclamation     = 0;
   _has_flushed_dependencies   = 0;
   _has_unsafe_access          = 0;
@@ -464,7 +467,11 @@
   _oops_do_mark_link       = NULL;
   _jmethod_id              = NULL;
   _osr_link                = NULL;
-  _scavenge_root_link      = NULL;
+  if (UseG1GC) {
+    _unloading_next        = NULL;
+  } else {
+    _scavenge_root_link    = NULL;
+  }
   _scavenge_root_state     = 0;
   _compiler                = NULL;
 #if INCLUDE_RTM_OPT
@@ -1146,7 +1153,7 @@
     switch(iter.type()) {
       case relocInfo::virtual_call_type:
       case relocInfo::opt_virtual_call_type: {
-        CompiledIC *ic = CompiledIC_at(iter.reloc());
+        CompiledIC *ic = CompiledIC_at(&iter);
         // Ok, to lookup references to zombies here
         CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
         if( cb != NULL && cb->is_nmethod() ) {
@@ -1170,6 +1177,77 @@
   }
 }
 
+void nmethod::verify_clean_inline_caches() {
+  assert_locked_or_safepoint(CompiledIC_lock);
+
+  // If the method is not entrant or zombie then a JMP is plastered over the
+  // first few bytes.  If an oop in the old code was there, that oop
+  // should not get GC'd.  Skip the first few bytes of oops on
+  // not-entrant methods.
+  address low_boundary = verified_entry_point();
+  if (!is_in_use()) {
+    low_boundary += NativeJump::instruction_size;
+    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
+    // This means that the low_boundary is going to be a little too high.
+    // This shouldn't matter, since oops of non-entrant methods are never used.
+    // In fact, why are we bothering to look at oops in a non-entrant method??
+  }
+
+  ResourceMark rm;
+  RelocIterator iter(this, low_boundary);
+  while(iter.next()) {
+    switch(iter.type()) {
+      case relocInfo::virtual_call_type:
+      case relocInfo::opt_virtual_call_type: {
+        CompiledIC *ic = CompiledIC_at(&iter);
+        // Ok, to lookup references to zombies here
+        CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
+        if( cb != NULL && cb->is_nmethod() ) {
+          nmethod* nm = (nmethod*)cb;
+          // Verify that inline caches pointing to both zombie and not_entrant methods are clean
+          if (!nm->is_in_use() || (nm->method()->code() != nm)) {
+            assert(ic->is_clean(), "IC should be clean");
+          }
+        }
+        break;
+      }
+      case relocInfo::static_call_type: {
+        CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
+        CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
+        if( cb != NULL && cb->is_nmethod() ) {
+          nmethod* nm = (nmethod*)cb;
+          // Verify that inline caches pointing to both zombie and not_entrant methods are clean
+          if (!nm->is_in_use() || (nm->method()->code() != nm)) {
+            assert(csc->is_clean(), "IC should be clean");
+          }
+        }
+        break;
+      }
+    }
+  }
+}
+
+int nmethod::verify_icholder_relocations() {
+  int count = 0;
+
+  RelocIterator iter(this);
+  while(iter.next()) {
+    if (iter.type() == relocInfo::virtual_call_type) {
+      if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
+        CompiledIC *ic = CompiledIC_at(&iter);
+        if (TraceCompiledIC) {
+          tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
+          ic->print();
+        }
+        assert(ic->cached_icholder() != NULL, "must be non-NULL");
+        count++;
+      }
+    }
+  }
+
+  return count;
+}
+
 // This is a private interface with the sweeper.
 void nmethod::mark_as_seen_on_stack() {
   assert(is_alive(), "Must be an alive method");
@@ -1202,6 +1280,23 @@
   mdo->inc_decompile_count();
 }
 
+void nmethod::increase_unloading_clock() {
+  _global_unloading_clock++;
+  if (_global_unloading_clock == 0) {
+    // _nmethods are allocated with _unloading_clock == 0,
+    // so 0 is never used as a clock value.
+    _global_unloading_clock = 1;
+  }
+}
+
+void nmethod::set_unloading_clock(unsigned char unloading_clock) {
+  OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
+}
+
+unsigned char nmethod::unloading_clock() {
+  return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
+}
+
 void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
 
   post_compiled_method_unload();
@@ -1247,6 +1342,10 @@
     // for later on.
     CodeCache::set_needs_cache_clean(true);
   }
+
+  // Unregister must be done before the state change
+  Universe::heap()->unregister_nmethod(this);
+
   _state = unloaded;
 
   // Log the unloading.
@@ -1590,6 +1689,35 @@
   set_unload_reported();
 }
 
+void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
+  if (ic->is_icholder_call()) {
+    // The only exception is compiledICHolder oops which may
+    // yet be marked below. (We check this further below).
+    CompiledICHolder* cichk_oop = ic->cached_icholder();
+    if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
+        cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
+      return;
+    }
+  } else {
+    Metadata* ic_oop = ic->cached_metadata();
+    if (ic_oop != NULL) {
+      if (ic_oop->is_klass()) {
+        if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
+          return;
+        }
+      } else if (ic_oop->is_method()) {
+        if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
+          return;
+        }
+      } else {
+        ShouldNotReachHere();
+      }
+    }
+  }
+
+  ic->set_to_clean();
+}
+
 // This is called at the end of the strong tracing/marking phase of a
 // GC to unload an nmethod if it contains otherwise unreachable
 // oops.
@@ -1632,32 +1760,8 @@
     RelocIterator iter(this, low_boundary);
     while(iter.next()) {
       if (iter.type() == relocInfo::virtual_call_type) {
-        CompiledIC *ic = CompiledIC_at(iter.reloc());
-        if (ic->is_icholder_call()) {
-          // The only exception is compiledICHolder oops which may
-          // yet be marked below. (We check this further below).
-          CompiledICHolder* cichk_oop = ic->cached_icholder();
-          if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
-              cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
-            continue;
-          }
-        } else {
-          Metadata* ic_oop = ic->cached_metadata();
-          if (ic_oop != NULL) {
-            if (ic_oop->is_klass()) {
-              if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
-                continue;
-              }
-            } else if (ic_oop->is_method()) {
-              if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
-                continue;
-              }
-            } else {
-              ShouldNotReachHere();
-            }
-          }
-        }
-        ic->set_to_clean();
+        CompiledIC *ic = CompiledIC_at(&iter);
+        clean_ic_if_metadata_is_dead(ic, is_alive);
       }
     }
   }
@@ -1695,6 +1799,175 @@
   verify_metadata_loaders(low_boundary, is_alive);
 }
 
+template <class CompiledICorStaticCall>
+static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, nmethod* from) {
+  // Ok, to lookup references to zombies here
+  CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
+  if (cb != NULL && cb->is_nmethod()) {
+    nmethod* nm = (nmethod*)cb;
+
+    if (nm->unloading_clock() != nmethod::global_unloading_clock()) {
+      // The nmethod has not been processed yet.
+      return true;
+    }
+
+    // Clean inline caches pointing to both zombie and not_entrant methods
+    if (!nm->is_in_use() || (nm->method()->code() != nm)) {
+      ic->set_to_clean();
+      assert(ic->is_clean(), err_msg("nmethod " PTR_FORMAT "not clean %s", from, from->method()->name_and_sig_as_C_string()));
+    }
+  }
+
+  return false;
+}
+
+static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, nmethod* from) {
+  return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
+}
+
+static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, nmethod* from) {
+  return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
+}
+
+bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
+  ResourceMark rm;
+
+  // Make sure the oop's ready to receive visitors
+  assert(!is_zombie() && !is_unloaded(),
+         "should not call follow on zombie or unloaded nmethod");
+
+  // If the method is not entrant then a JMP is plastered over the
+  // first few bytes.  If an oop in the old code was there, that oop
+  // should not get GC'd.  Skip the first few bytes of oops on
+  // not-entrant methods.
+  address low_boundary = verified_entry_point();
+  if (is_not_entrant()) {
+    low_boundary += NativeJump::instruction_size;
+    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
+    // (See comment above.)
+  }
+
+  // The RedefineClasses() API can cause the class unloading invariant
+  // to no longer be true. See jvmtiExport.hpp for details.
+  // Also, leave a debugging breadcrumb in local flag.
+  bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
+  if (a_class_was_redefined) {
+    // This set of the unloading_occurred flag is done before the
+    // call to post_compiled_method_unload() so that the unloading
+    // of this nmethod is reported.
+    unloading_occurred = true;
+  }
+
+  // Exception cache
+  clean_exception_cache(is_alive);
+
+  bool is_unloaded = false;
+  bool postponed = false;
+
+  RelocIterator iter(this, low_boundary);
+  while(iter.next()) {
+
+    switch (iter.type()) {
+
+    case relocInfo::virtual_call_type:
+      if (unloading_occurred) {
+        // If class unloading occurred we first iterate over all inline caches and
+        // clear ICs where the cached oop is referring to an unloaded klass or method.
+        clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
+      }
+
+      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
+      break;
+
+    case relocInfo::opt_virtual_call_type:
+      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
+      break;
+
+    case relocInfo::static_call_type:
+      postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
+      break;
+
+    case relocInfo::oop_type:
+      if (!is_unloaded) {
+        // Unload check
+        oop_Relocation* r = iter.oop_reloc();
+        // Traverse those oops directly embedded in the code.
+        // Other oops (oop_index>0) are seen as part of scopes_oops.
+        assert(1 == (r->oop_is_immediate()) +
+                  (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
+              "oop must be found in exactly one place");
+        if (r->oop_is_immediate() && r->oop_value() != NULL) {
+          if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
+            is_unloaded = true;
+          }
+        }
+      }
+      break;
+
+    }
+  }
+
+  if (is_unloaded) {
+    return postponed;
+  }
+
+  // Scopes
+  for (oop* p = oops_begin(); p < oops_end(); p++) {
+    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
+    if (can_unload(is_alive, p, unloading_occurred)) {
+      is_unloaded = true;
+      break;
+    }
+  }
+
+  if (is_unloaded) {
+    return postponed;
+  }
+
+  // Ensure that all metadata is still alive
+  verify_metadata_loaders(low_boundary, is_alive);
+
+  return postponed;
+}
+
+void nmethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
+  ResourceMark rm;
+
+  // Make sure the oop's ready to receive visitors
+  assert(!is_zombie(),
+         "should not call follow on zombie nmethod");
+
+  // If the method is not entrant then a JMP is plastered over the
+  // first few bytes.  If an oop in the old code was there, that oop
+  // should not get GC'd.  Skip the first few bytes of oops on
+  // not-entrant methods.
+  address low_boundary = verified_entry_point();
+  if (is_not_entrant()) {
+    low_boundary += NativeJump::instruction_size;
+    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
+    // (See comment above.)
+  }
+
+  RelocIterator iter(this, low_boundary);
+  while(iter.next()) {
+
+    switch (iter.type()) {
+
+    case relocInfo::virtual_call_type:
+      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
+      break;
+
+    case relocInfo::opt_virtual_call_type:
+      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
+      break;
+
+    case relocInfo::static_call_type:
+      clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
+      break;
+    }
+  }
+}
+
 #ifdef ASSERT
 
 class CheckClass : AllStatic {
@@ -1741,7 +2014,7 @@
     // compiled code is maintaining a link to dead metadata.
     address static_call_addr = NULL;
     if (iter.type() == relocInfo::opt_virtual_call_type) {
-      CompiledIC* cic = CompiledIC_at(iter.reloc());
+      CompiledIC* cic = CompiledIC_at(&iter);
       if (!cic->is_call_to_interpreted()) {
         static_call_addr = iter.addr();
       }
@@ -1793,7 +2066,7 @@
         }
       } else if (iter.type() == relocInfo::virtual_call_type) {
         // Check compiledIC holders associated with this nmethod
-        CompiledIC *ic = CompiledIC_at(iter.reloc());
+        CompiledIC *ic = CompiledIC_at(&iter);
         if (ic->is_icholder_call()) {
           CompiledICHolder* cichk = ic->cached_icholder();
           f(cichk->holder_method());
@@ -1911,7 +2184,7 @@
     assert(cur != NULL, "not NULL-terminated");
     nmethod* next = cur->_oops_do_mark_link;
     cur->_oops_do_mark_link = NULL;
-    cur->fix_oop_relocations();
+    cur->verify_oop_relocations();
     NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark"));
     cur = next;
   }
@@ -2479,6 +2752,10 @@
 };
 
 void nmethod::verify_scavenge_root_oops() {
+  if (UseG1GC) {
+    return;
+  }
+
   if (!on_scavenge_root_list()) {
     // Actually look inside, to verify the claim that it's clean.
     DebugScavengeRoot debug_scavenge_root(this);
@@ -2922,7 +3199,7 @@
     case relocInfo::virtual_call_type:
     case relocInfo::opt_virtual_call_type: {
       VerifyMutexLocker mc(CompiledIC_lock);
-      CompiledIC_at(iter.reloc())->print();
+      CompiledIC_at(&iter)->print();
       break;
     }
     case relocInfo::static_call_type:
--- a/hotspot/src/share/vm/code/nmethod.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -111,6 +111,11 @@
   friend class NMethodSweeper;
   friend class CodeCache;  // scavengable oops
  private:
+
+  // GC support to help figure out if an nmethod has been
+  // cleaned/unloaded by the current GC.
+  static unsigned char _global_unloading_clock;
+
   // Shared fields for all nmethod's
   Method*   _method;
   int       _entry_bci;        // != InvocationEntryBci if this nmethod is an on-stack replacement method
@@ -118,7 +123,13 @@
 
   // To support simple linked-list chaining of nmethods:
   nmethod*  _osr_link;         // from InstanceKlass::osr_nmethods_head
-  nmethod*  _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
+
+  union {
+    // Used by G1 to chain nmethods.
+    nmethod* _unloading_next;
+    // Used by non-G1 GCs to chain nmethods.
+    nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
+  };
 
   static nmethod* volatile _oops_do_mark_nmethods;
   nmethod*        volatile _oops_do_mark_link;
@@ -180,6 +191,8 @@
   // Protected by Patching_lock
   volatile unsigned char _state;             // {alive, not_entrant, zombie, unloaded}
 
+  volatile unsigned char _unloading_clock;   // Incremented after GC unloaded/cleaned the nmethod
+
 #ifdef ASSERT
   bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
 #endif
@@ -437,6 +450,15 @@
   bool  unload_reported()                         { return _unload_reported; }
   void  set_unload_reported()                     { _unload_reported = true; }
 
+  void set_unloading_next(nmethod* next)          { _unloading_next = next; }
+  nmethod* unloading_next()                       { return _unloading_next; }
+
+  static unsigned char global_unloading_clock()   { return _global_unloading_clock; }
+  static void increase_unloading_clock();
+
+  void set_unloading_clock(unsigned char unloading_clock);
+  unsigned char unloading_clock();
+
   bool  is_marked_for_deoptimization() const      { return _marked_for_deoptimization; }
   void  mark_for_deoptimization()                 { _marked_for_deoptimization = true; }
 
@@ -552,6 +574,10 @@
     return (addr >= code_begin() && addr < verified_entry_point());
   }
 
+  // Verify calls to dead methods have been cleaned.
+  void verify_clean_inline_caches();
+  // Verify and count cached icholder relocations.
+  int  verify_icholder_relocations();
   // Check that all metadata is still alive
   void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
 
@@ -577,6 +603,10 @@
 
   // GC support
   void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
+  //  The parallel versions are used by G1.
+  bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
+  void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
+  //  Unload a nmethod if the *root object is dead.
   bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
 
   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
--- a/hotspot/src/share/vm/code/stubs.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/code/stubs.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,21 +27,6 @@
 
 #include "asm/codeBuffer.hpp"
 #include "memory/allocation.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_aix
-# include "os_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 
 // The classes in this file provide a simple framework for the
 // management of little pieces of machine code - or stubs -
--- a/hotspot/src/share/vm/code/vtableStubs.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/code/vtableStubs.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "code/vtableStubs.hpp"
+#include "compiler/compileBroker.hpp"
 #include "compiler/disassembler.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
@@ -62,6 +63,7 @@
    // If changing the name, update the other file accordingly.
     BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
     if (blob == NULL) {
+      CompileBroker::handle_full_code_cache();
       return NULL;
     }
     _chunk = blob->content_begin();
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1048,7 +1048,7 @@
   }
 
   // Let go of Threads_lock before yielding
-  os::yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS)
+  os::naked_yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS)
 
   return compiler_thread;
 }
@@ -2123,6 +2123,7 @@
   ResourceMark rm;
   char* method_name = method->name()->as_C_string();
   strncpy(_last_method_compiled, method_name, CompileBroker::name_buffer_length);
+  _last_method_compiled[CompileBroker::name_buffer_length - 1] = '\0'; // ensure null terminated
   char current_method[CompilerCounters::cmname_buffer_length];
   size_t maxLen = CompilerCounters::cmname_buffer_length;
 
--- a/hotspot/src/share/vm/compiler/disassembler.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/compiler/disassembler.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -30,6 +30,7 @@
 #include "memory/cardTableModRefBS.hpp"
 #include "runtime/fprofiler.hpp"
 #include "runtime/handles.inline.hpp"
+#include "runtime/os.hpp"
 #include "runtime/stubCodeGenerator.hpp"
 #include "runtime/stubRoutines.hpp"
 #ifdef TARGET_ARCH_x86
--- a/hotspot/src/share/vm/compiler/disassembler.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/compiler/disassembler.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,21 +27,6 @@
 
 #include "asm/codeBuffer.hpp"
 #include "runtime/globals.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_aix
-# include "os_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 
 class decode_env;
 
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -29,6 +29,7 @@
 #include "memory/sharedHeap.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/mutex.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/vmThread.hpp"
 
 template <>
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1354 +0,0 @@
-/*
- * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
-#include "gc_implementation/shared/gcStats.hpp"
-#include "memory/defNewGeneration.hpp"
-#include "memory/genCollectedHeap.hpp"
-#include "runtime/thread.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_aix
-# include "os_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
-elapsedTimer CMSAdaptiveSizePolicy::_concurrent_timer;
-elapsedTimer CMSAdaptiveSizePolicy::_STW_timer;
-
-// Defined if the granularity of the time measurements is potentially too large.
-#define CLOCK_GRANULARITY_TOO_LARGE
-
-CMSAdaptiveSizePolicy::CMSAdaptiveSizePolicy(size_t init_eden_size,
-                                             size_t init_promo_size,
-                                             size_t init_survivor_size,
-                                             double max_gc_minor_pause_sec,
-                                             double max_gc_pause_sec,
-                                             uint gc_cost_ratio) :
-  AdaptiveSizePolicy(init_eden_size,
-                     init_promo_size,
-                     init_survivor_size,
-                     max_gc_pause_sec,
-                     gc_cost_ratio) {
-
-  clear_internal_time_intervals();
-
-  _processor_count = os::active_processor_count();
-
-  if (CMSConcurrentMTEnabled && (ConcGCThreads > 1)) {
-    assert(_processor_count > 0, "Processor count is suspect");
-    _concurrent_processor_count = MIN2((uint) ConcGCThreads,
-                                       (uint) _processor_count);
-  } else {
-    _concurrent_processor_count = 1;
-  }
-
-  _avg_concurrent_time  = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-  _avg_concurrent_interval = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-  _avg_concurrent_gc_cost = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-
-  _avg_initial_pause    = new AdaptivePaddedAverage(AdaptiveTimeWeight,
-                                                    PausePadding);
-  _avg_remark_pause     = new AdaptivePaddedAverage(AdaptiveTimeWeight,
-                                                    PausePadding);
-
-  _avg_cms_STW_time     = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-  _avg_cms_STW_gc_cost  = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-
-  _avg_cms_free         = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-  _avg_cms_free_at_sweep = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-  _avg_cms_promo        = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-
-  // Mark-sweep-compact
-  _avg_msc_pause        = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-  _avg_msc_interval     = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-  _avg_msc_gc_cost      = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-
-  // Mark-sweep
-  _avg_ms_pause = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-  _avg_ms_interval      = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-  _avg_ms_gc_cost       = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-
-  // Variables that estimate pause times as a function of generation
-  // size.
-  _remark_pause_old_estimator =
-    new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
-  _initial_pause_old_estimator =
-    new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
-  _remark_pause_young_estimator =
-    new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
-  _initial_pause_young_estimator =
-    new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
-
-  // Alignment comes from that used in ReservedSpace.
-  _generation_alignment = os::vm_allocation_granularity();
-
-  // Start the concurrent timer here so that the first
-  // concurrent_phases_begin() measures a finite mutator
-  // time.  A finite mutator time is used to determine
-  // if a concurrent collection has been started.  If this
-  // proves to be a problem, use some explicit flag to
-  // signal that a concurrent collection has been started.
-  _concurrent_timer.start();
-  _STW_timer.start();
-}
-
-double CMSAdaptiveSizePolicy::concurrent_processor_fraction() {
-  // For now assume no other daemon threads are taking alway
-  // cpu's from the application.
-  return ((double) _concurrent_processor_count / (double) _processor_count);
-}
-
-double CMSAdaptiveSizePolicy::concurrent_collection_cost(
-                                                  double interval_in_seconds) {
-  //  When the precleaning and sweeping phases use multiple
-  // threads, change one_processor_fraction to
-  // concurrent_processor_fraction().
-  double one_processor_fraction = 1.0 / ((double) processor_count());
-  double concurrent_cost =
-    collection_cost(_latest_cms_concurrent_marking_time_secs,
-                interval_in_seconds) * concurrent_processor_fraction() +
-    collection_cost(_latest_cms_concurrent_precleaning_time_secs,
-                interval_in_seconds) * one_processor_fraction +
-    collection_cost(_latest_cms_concurrent_sweeping_time_secs,
-                interval_in_seconds) * one_processor_fraction;
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr(
-      "\nCMSAdaptiveSizePolicy::scaled_concurrent_collection_cost(%f) "
-      "_latest_cms_concurrent_marking_cost %f "
-      "_latest_cms_concurrent_precleaning_cost %f "
-      "_latest_cms_concurrent_sweeping_cost %f "
-      "concurrent_processor_fraction %f "
-      "concurrent_cost %f ",
-      interval_in_seconds,
-      collection_cost(_latest_cms_concurrent_marking_time_secs,
-        interval_in_seconds),
-      collection_cost(_latest_cms_concurrent_precleaning_time_secs,
-        interval_in_seconds),
-      collection_cost(_latest_cms_concurrent_sweeping_time_secs,
-        interval_in_seconds),
-      concurrent_processor_fraction(),
-      concurrent_cost);
-  }
-  return concurrent_cost;
-}
-
-double CMSAdaptiveSizePolicy::concurrent_collection_time() {
-  double latest_cms_sum_concurrent_phases_time_secs =
-    _latest_cms_concurrent_marking_time_secs +
-    _latest_cms_concurrent_precleaning_time_secs +
-    _latest_cms_concurrent_sweeping_time_secs;
-  return latest_cms_sum_concurrent_phases_time_secs;
-}
-
-double CMSAdaptiveSizePolicy::scaled_concurrent_collection_time() {
-  //  When the precleaning and sweeping phases use multiple
-  // threads, change one_processor_fraction to
-  // concurrent_processor_fraction().
-  double one_processor_fraction = 1.0 / ((double) processor_count());
-  double latest_cms_sum_concurrent_phases_time_secs =
-    _latest_cms_concurrent_marking_time_secs * concurrent_processor_fraction() +
-    _latest_cms_concurrent_precleaning_time_secs * one_processor_fraction +
-    _latest_cms_concurrent_sweeping_time_secs * one_processor_fraction ;
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr(
-      "\nCMSAdaptiveSizePolicy::scaled_concurrent_collection_time "
-      "_latest_cms_concurrent_marking_time_secs %f "
-      "_latest_cms_concurrent_precleaning_time_secs %f "
-      "_latest_cms_concurrent_sweeping_time_secs %f "
-      "concurrent_processor_fraction %f "
-      "latest_cms_sum_concurrent_phases_time_secs %f ",
-      _latest_cms_concurrent_marking_time_secs,
-      _latest_cms_concurrent_precleaning_time_secs,
-      _latest_cms_concurrent_sweeping_time_secs,
-      concurrent_processor_fraction(),
-      latest_cms_sum_concurrent_phases_time_secs);
-  }
-  return latest_cms_sum_concurrent_phases_time_secs;
-}
-
-void CMSAdaptiveSizePolicy::update_minor_pause_old_estimator(
-    double minor_pause_in_ms) {
-  // Get the equivalent of the free space
-  // that is available for promotions in the CMS generation
-  // and use that to update _minor_pause_old_estimator
-
-  // Don't implement this until it is needed. A warning is
-  // printed if _minor_pause_old_estimator is used.
-}
-
-void CMSAdaptiveSizePolicy::concurrent_marking_begin() {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print(" ");
-    gclog_or_tty->stamp();
-    gclog_or_tty->print(": concurrent_marking_begin ");
-  }
-  //  Update the interval time
-  _concurrent_timer.stop();
-  _latest_cms_collection_end_to_collection_start_secs = _concurrent_timer.seconds();
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr("CMSAdaptiveSizePolicy::concurrent_marking_begin: "
-    "mutator time %f", _latest_cms_collection_end_to_collection_start_secs);
-  }
-  _concurrent_timer.reset();
-  _concurrent_timer.start();
-}
-
-void CMSAdaptiveSizePolicy::concurrent_marking_end() {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->stamp();
-    gclog_or_tty->print_cr("CMSAdaptiveSizePolicy::concurrent_marking_end()");
-  }
-
-  _concurrent_timer.stop();
-  _latest_cms_concurrent_marking_time_secs = _concurrent_timer.seconds();
-
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr("\n CMSAdaptiveSizePolicy::concurrent_marking_end"
-      ":concurrent marking time (s) %f",
-      _latest_cms_concurrent_marking_time_secs);
-  }
-}
-
-void CMSAdaptiveSizePolicy::concurrent_precleaning_begin() {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->stamp();
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::concurrent_precleaning_begin()");
-  }
-  _concurrent_timer.reset();
-  _concurrent_timer.start();
-}
-
-
-void CMSAdaptiveSizePolicy::concurrent_precleaning_end() {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->stamp();
-    gclog_or_tty->print_cr("CMSAdaptiveSizePolicy::concurrent_precleaning_end()");
-  }
-
-  _concurrent_timer.stop();
-  // May be set again by a second call during the same collection.
-  _latest_cms_concurrent_precleaning_time_secs = _concurrent_timer.seconds();
-
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr("\n CMSAdaptiveSizePolicy::concurrent_precleaning_end"
-      ":concurrent precleaning time (s) %f",
-      _latest_cms_concurrent_precleaning_time_secs);
-  }
-}
-
-void CMSAdaptiveSizePolicy::concurrent_sweeping_begin() {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->stamp();
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::concurrent_sweeping_begin()");
-  }
-  _concurrent_timer.reset();
-  _concurrent_timer.start();
-}
-
-
-void CMSAdaptiveSizePolicy::concurrent_sweeping_end() {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->stamp();
-    gclog_or_tty->print_cr("CMSAdaptiveSizePolicy::concurrent_sweeping_end()");
-  }
-
-  _concurrent_timer.stop();
-  _latest_cms_concurrent_sweeping_time_secs = _concurrent_timer.seconds();
-
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr("\n CMSAdaptiveSizePolicy::concurrent_sweeping_end"
-      ":concurrent sweeping time (s) %f",
-      _latest_cms_concurrent_sweeping_time_secs);
-  }
-}
-
-void CMSAdaptiveSizePolicy::concurrent_phases_end(GCCause::Cause gc_cause,
-                                                  size_t cur_eden,
-                                                  size_t cur_promo) {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print(" ");
-    gclog_or_tty->stamp();
-    gclog_or_tty->print(": concurrent_phases_end ");
-  }
-
-  // Update the concurrent timer
-  _concurrent_timer.stop();
-
-  if (gc_cause != GCCause::_java_lang_system_gc ||
-      UseAdaptiveSizePolicyWithSystemGC) {
-
-    avg_cms_free()->sample(cur_promo);
-    double latest_cms_sum_concurrent_phases_time_secs =
-      concurrent_collection_time();
-
-    _avg_concurrent_time->sample(latest_cms_sum_concurrent_phases_time_secs);
-
-    // Cost of collection (unit-less)
-
-    // Total interval for collection.  May not be valid.  Tests
-    // below determine whether to use this.
-    //
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr("\nCMSAdaptiveSizePolicy::concurrent_phases_end \n"
-      "_latest_cms_reset_end_to_initial_mark_start_secs %f \n"
-      "_latest_cms_initial_mark_start_to_end_time_secs %f \n"
-      "_latest_cms_remark_start_to_end_time_secs %f \n"
-      "_latest_cms_concurrent_marking_time_secs %f \n"
-      "_latest_cms_concurrent_precleaning_time_secs %f \n"
-      "_latest_cms_concurrent_sweeping_time_secs %f \n"
-      "latest_cms_sum_concurrent_phases_time_secs %f \n"
-      "_latest_cms_collection_end_to_collection_start_secs %f \n"
-      "concurrent_processor_fraction %f",
-      _latest_cms_reset_end_to_initial_mark_start_secs,
-      _latest_cms_initial_mark_start_to_end_time_secs,
-      _latest_cms_remark_start_to_end_time_secs,
-      _latest_cms_concurrent_marking_time_secs,
-      _latest_cms_concurrent_precleaning_time_secs,
-      _latest_cms_concurrent_sweeping_time_secs,
-      latest_cms_sum_concurrent_phases_time_secs,
-      _latest_cms_collection_end_to_collection_start_secs,
-      concurrent_processor_fraction());
-  }
-    double interval_in_seconds =
-      _latest_cms_initial_mark_start_to_end_time_secs +
-      _latest_cms_remark_start_to_end_time_secs +
-      latest_cms_sum_concurrent_phases_time_secs +
-      _latest_cms_collection_end_to_collection_start_secs;
-    assert(interval_in_seconds >= 0.0,
-      "Bad interval between cms collections");
-
-    // Sample for performance counter
-    avg_concurrent_interval()->sample(interval_in_seconds);
-
-    // STW costs (initial and remark pauses)
-    // Cost of collection (unit-less)
-    assert(_latest_cms_initial_mark_start_to_end_time_secs >= 0.0,
-      "Bad initial mark pause");
-    assert(_latest_cms_remark_start_to_end_time_secs >= 0.0,
-      "Bad remark pause");
-    double STW_time_in_seconds =
-      _latest_cms_initial_mark_start_to_end_time_secs +
-      _latest_cms_remark_start_to_end_time_secs;
-    double STW_collection_cost = 0.0;
-    if (interval_in_seconds > 0.0) {
-      // cost for the STW phases of the concurrent collection.
-      STW_collection_cost = STW_time_in_seconds / interval_in_seconds;
-      avg_cms_STW_gc_cost()->sample(STW_collection_cost);
-    }
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print("cmsAdaptiveSizePolicy::STW_collection_end: "
-        "STW gc cost: %f  average: %f", STW_collection_cost,
-        avg_cms_STW_gc_cost()->average());
-      gclog_or_tty->print_cr("  STW pause: %f (ms) STW period %f (ms)",
-        (double) STW_time_in_seconds * MILLIUNITS,
-        (double) interval_in_seconds * MILLIUNITS);
-    }
-
-    double concurrent_cost = 0.0;
-    if (latest_cms_sum_concurrent_phases_time_secs > 0.0) {
-      concurrent_cost = concurrent_collection_cost(interval_in_seconds);
-
-      avg_concurrent_gc_cost()->sample(concurrent_cost);
-      // Average this ms cost into all the other types gc costs
-
-      if (PrintAdaptiveSizePolicy && Verbose) {
-        gclog_or_tty->print("cmsAdaptiveSizePolicy::concurrent_phases_end: "
-          "concurrent gc cost: %f  average: %f",
-          concurrent_cost,
-          _avg_concurrent_gc_cost->average());
-        gclog_or_tty->print_cr("  concurrent time: %f (ms) cms period %f (ms)"
-          " processor fraction: %f",
-          latest_cms_sum_concurrent_phases_time_secs * MILLIUNITS,
-          interval_in_seconds * MILLIUNITS,
-          concurrent_processor_fraction());
-      }
-    }
-    double total_collection_cost = STW_collection_cost + concurrent_cost;
-    avg_major_gc_cost()->sample(total_collection_cost);
-
-    // Gather information for estimating future behavior
-    double initial_pause_in_ms = _latest_cms_initial_mark_start_to_end_time_secs * MILLIUNITS;
-    double remark_pause_in_ms = _latest_cms_remark_start_to_end_time_secs * MILLIUNITS;
-
-    double cur_promo_size_in_mbytes = ((double)cur_promo)/((double)M);
-    initial_pause_old_estimator()->update(cur_promo_size_in_mbytes,
-      initial_pause_in_ms);
-    remark_pause_old_estimator()->update(cur_promo_size_in_mbytes,
-      remark_pause_in_ms);
-    major_collection_estimator()->update(cur_promo_size_in_mbytes,
-      total_collection_cost);
-
-    // This estimate uses the average eden size.  It could also
-    // have used the latest eden size.  Which is better?
-    double cur_eden_size_in_mbytes = ((double)cur_eden)/((double) M);
-    initial_pause_young_estimator()->update(cur_eden_size_in_mbytes,
-      initial_pause_in_ms);
-    remark_pause_young_estimator()->update(cur_eden_size_in_mbytes,
-      remark_pause_in_ms);
-  }
-
-  clear_internal_time_intervals();
-
-  set_first_after_collection();
-
-  // The concurrent phases keeps track of it's own mutator interval
-  // with this timer.  This allows the stop-the-world phase to
-  // be included in the mutator time so that the stop-the-world time
-  // is not double counted.  Reset and start it.
-  _concurrent_timer.reset();
-  _concurrent_timer.start();
-
-  // The mutator time between STW phases does not include the
-  // concurrent collection time.
-  _STW_timer.reset();
-  _STW_timer.start();
-}
-
-void CMSAdaptiveSizePolicy::checkpoint_roots_initial_begin() {
-  //  Update the interval time
-  _STW_timer.stop();
-  _latest_cms_reset_end_to_initial_mark_start_secs = _STW_timer.seconds();
-  // Reset for the initial mark
-  _STW_timer.reset();
-  _STW_timer.start();
-}
-
-void CMSAdaptiveSizePolicy::checkpoint_roots_initial_end(
-    GCCause::Cause gc_cause) {
-  _STW_timer.stop();
-
-  if (gc_cause != GCCause::_java_lang_system_gc ||
-      UseAdaptiveSizePolicyWithSystemGC) {
-    _latest_cms_initial_mark_start_to_end_time_secs = _STW_timer.seconds();
-    avg_initial_pause()->sample(_latest_cms_initial_mark_start_to_end_time_secs);
-
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print(
-        "cmsAdaptiveSizePolicy::checkpoint_roots_initial_end: "
-        "initial pause: %f ", _latest_cms_initial_mark_start_to_end_time_secs);
-    }
-  }
-
-  _STW_timer.reset();
-  _STW_timer.start();
-}
-
-void CMSAdaptiveSizePolicy::checkpoint_roots_final_begin() {
-  _STW_timer.stop();
-  _latest_cms_initial_mark_end_to_remark_start_secs = _STW_timer.seconds();
-  // Start accumulating time for the remark in the STW timer.
-  _STW_timer.reset();
-  _STW_timer.start();
-}
-
-void CMSAdaptiveSizePolicy::checkpoint_roots_final_end(
-    GCCause::Cause gc_cause) {
-  _STW_timer.stop();
-  if (gc_cause != GCCause::_java_lang_system_gc ||
-      UseAdaptiveSizePolicyWithSystemGC) {
-    // Total initial mark pause + remark pause.
-    _latest_cms_remark_start_to_end_time_secs = _STW_timer.seconds();
-    double STW_time_in_seconds = _latest_cms_initial_mark_start_to_end_time_secs +
-      _latest_cms_remark_start_to_end_time_secs;
-    double STW_time_in_ms = STW_time_in_seconds * MILLIUNITS;
-
-    avg_remark_pause()->sample(_latest_cms_remark_start_to_end_time_secs);
-
-    // Sample total for initial mark + remark
-    avg_cms_STW_time()->sample(STW_time_in_seconds);
-
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print("cmsAdaptiveSizePolicy::checkpoint_roots_final_end: "
-        "remark pause: %f", _latest_cms_remark_start_to_end_time_secs);
-    }
-
-  }
-  // Don't start the STW times here because the concurrent
-  // sweep and reset has not happened.
-  //  Keep the old comment above in case I don't understand
-  // what is going on but now
-  // Start the STW timer because it is used by ms_collection_begin()
-  // and ms_collection_end() to get the sweep time if a MS is being
-  // done in the foreground.
-  _STW_timer.reset();
-  _STW_timer.start();
-}
-
-void CMSAdaptiveSizePolicy::msc_collection_begin() {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print(" ");
-    gclog_or_tty->stamp();
-    gclog_or_tty->print(": msc_collection_begin ");
-  }
-  _STW_timer.stop();
-  _latest_cms_msc_end_to_msc_start_time_secs = _STW_timer.seconds();
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr("CMSAdaptiveSizePolicy::msc_collection_begin: "
-      "mutator time %f",
-      _latest_cms_msc_end_to_msc_start_time_secs);
-  }
-  avg_msc_interval()->sample(_latest_cms_msc_end_to_msc_start_time_secs);
-  _STW_timer.reset();
-  _STW_timer.start();
-}
-
-void CMSAdaptiveSizePolicy::msc_collection_end(GCCause::Cause gc_cause) {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print(" ");
-    gclog_or_tty->stamp();
-    gclog_or_tty->print(": msc_collection_end ");
-  }
-  _STW_timer.stop();
-  if (gc_cause != GCCause::_java_lang_system_gc ||
-        UseAdaptiveSizePolicyWithSystemGC) {
-    double msc_pause_in_seconds = _STW_timer.seconds();
-    if ((_latest_cms_msc_end_to_msc_start_time_secs > 0.0) &&
-        (msc_pause_in_seconds > 0.0)) {
-      avg_msc_pause()->sample(msc_pause_in_seconds);
-      double mutator_time_in_seconds = 0.0;
-      if (_latest_cms_collection_end_to_collection_start_secs == 0.0) {
-        // This assertion may fail because of time stamp granularity.
-        // Comment it out and investigate it at a later time.  The large
-        // time stamp granularity occurs on some older linux systems.
-#ifndef CLOCK_GRANULARITY_TOO_LARGE
-        assert((_latest_cms_concurrent_marking_time_secs == 0.0) &&
-               (_latest_cms_concurrent_precleaning_time_secs == 0.0) &&
-               (_latest_cms_concurrent_sweeping_time_secs == 0.0),
-          "There should not be any concurrent time");
-#endif
-        // A concurrent collection did not start.  Mutator time
-        // between collections comes from the STW MSC timer.
-        mutator_time_in_seconds = _latest_cms_msc_end_to_msc_start_time_secs;
-      } else {
-        // The concurrent collection did start so count the mutator
-        // time to the start of the concurrent collection.  In this
-        // case the _latest_cms_msc_end_to_msc_start_time_secs measures
-        // the time between the initial mark or remark and the
-        // start of the MSC.  That has no real meaning.
-        mutator_time_in_seconds = _latest_cms_collection_end_to_collection_start_secs;
-      }
-
-      double latest_cms_sum_concurrent_phases_time_secs =
-        concurrent_collection_time();
-      double interval_in_seconds =
-        mutator_time_in_seconds +
-        _latest_cms_initial_mark_start_to_end_time_secs +
-        _latest_cms_remark_start_to_end_time_secs +
-        latest_cms_sum_concurrent_phases_time_secs +
-        msc_pause_in_seconds;
-
-      if (PrintAdaptiveSizePolicy && Verbose) {
-        gclog_or_tty->print_cr("  interval_in_seconds %f \n"
-          "     mutator_time_in_seconds %f \n"
-          "     _latest_cms_initial_mark_start_to_end_time_secs %f\n"
-          "     _latest_cms_remark_start_to_end_time_secs %f\n"
-          "     latest_cms_sum_concurrent_phases_time_secs %f\n"
-          "     msc_pause_in_seconds %f\n",
-          interval_in_seconds,
-          mutator_time_in_seconds,
-          _latest_cms_initial_mark_start_to_end_time_secs,
-          _latest_cms_remark_start_to_end_time_secs,
-          latest_cms_sum_concurrent_phases_time_secs,
-          msc_pause_in_seconds);
-      }
-
-      // The concurrent cost is wasted cost but it should be
-      // included.
-      double concurrent_cost = concurrent_collection_cost(interval_in_seconds);
-
-      // Initial mark and remark, also wasted.
-      double STW_time_in_seconds = _latest_cms_initial_mark_start_to_end_time_secs +
-        _latest_cms_remark_start_to_end_time_secs;
-      double STW_collection_cost =
-        collection_cost(STW_time_in_seconds, interval_in_seconds) +
-        concurrent_cost;
-
-      if (PrintAdaptiveSizePolicy && Verbose) {
-        gclog_or_tty->print_cr(" msc_collection_end:\n"
-          "_latest_cms_collection_end_to_collection_start_secs %f\n"
-          "_latest_cms_msc_end_to_msc_start_time_secs %f\n"
-          "_latest_cms_initial_mark_start_to_end_time_secs %f\n"
-          "_latest_cms_remark_start_to_end_time_secs %f\n"
-          "latest_cms_sum_concurrent_phases_time_secs %f\n",
-          _latest_cms_collection_end_to_collection_start_secs,
-          _latest_cms_msc_end_to_msc_start_time_secs,
-          _latest_cms_initial_mark_start_to_end_time_secs,
-          _latest_cms_remark_start_to_end_time_secs,
-          latest_cms_sum_concurrent_phases_time_secs);
-
-        gclog_or_tty->print_cr(" msc_collection_end: \n"
-          "latest_cms_sum_concurrent_phases_time_secs %f\n"
-          "STW_time_in_seconds %f\n"
-          "msc_pause_in_seconds %f\n",
-          latest_cms_sum_concurrent_phases_time_secs,
-          STW_time_in_seconds,
-          msc_pause_in_seconds);
-      }
-
-      double cost = concurrent_cost + STW_collection_cost +
-        collection_cost(msc_pause_in_seconds, interval_in_seconds);
-
-      _avg_msc_gc_cost->sample(cost);
-
-      // Average this ms cost into all the other types gc costs
-      avg_major_gc_cost()->sample(cost);
-
-      // Sample for performance counter
-      _avg_msc_interval->sample(interval_in_seconds);
-      if (PrintAdaptiveSizePolicy && Verbose) {
-        gclog_or_tty->print("cmsAdaptiveSizePolicy::msc_collection_end: "
-          "MSC gc cost: %f  average: %f", cost,
-          _avg_msc_gc_cost->average());
-
-        double msc_pause_in_ms = msc_pause_in_seconds * MILLIUNITS;
-        gclog_or_tty->print_cr("  MSC pause: %f (ms) MSC period %f (ms)",
-          msc_pause_in_ms, (double) interval_in_seconds * MILLIUNITS);
-      }
-    }
-  }
-
-  clear_internal_time_intervals();
-
-  // Can this call be put into the epilogue?
-  set_first_after_collection();
-
-  // The concurrent phases keeps track of it's own mutator interval
-  // with this timer.  This allows the stop-the-world phase to
-  // be included in the mutator time so that the stop-the-world time
-  // is not double counted.  Reset and start it.
-  _concurrent_timer.stop();
-  _concurrent_timer.reset();
-  _concurrent_timer.start();
-
-  _STW_timer.reset();
-  _STW_timer.start();
-}
-
-void CMSAdaptiveSizePolicy::ms_collection_begin() {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print(" ");
-    gclog_or_tty->stamp();
-    gclog_or_tty->print(": ms_collection_begin ");
-  }
-  _STW_timer.stop();
-  _latest_cms_ms_end_to_ms_start = _STW_timer.seconds();
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr("CMSAdaptiveSizePolicy::ms_collection_begin: "
-      "mutator time %f",
-      _latest_cms_ms_end_to_ms_start);
-  }
-  avg_ms_interval()->sample(_STW_timer.seconds());
-  _STW_timer.reset();
-  _STW_timer.start();
-}
-
-void CMSAdaptiveSizePolicy::ms_collection_end(GCCause::Cause gc_cause) {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print(" ");
-    gclog_or_tty->stamp();
-    gclog_or_tty->print(": ms_collection_end ");
-  }
-  _STW_timer.stop();
-  if (gc_cause != GCCause::_java_lang_system_gc ||
-        UseAdaptiveSizePolicyWithSystemGC) {
-    // The MS collection is a foreground collection that does all
-    // the parts of a mostly concurrent collection.
-    //
-    // For this collection include the cost of the
-    //  initial mark
-    //  remark
-    //  all concurrent time (scaled down by the
-    //    concurrent_processor_fraction).  Some
-    //    may be zero if the baton was passed before
-    //    it was reached.
-    //    concurrent marking
-    //    sweeping
-    //    resetting
-    //  STW after baton was passed (STW_in_foreground_in_seconds)
-    double STW_in_foreground_in_seconds = _STW_timer.seconds();
-
-    double latest_cms_sum_concurrent_phases_time_secs =
-      concurrent_collection_time();
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("\nCMSAdaptiveSizePolicy::ms_collection_end "
-        "STW_in_foreground_in_seconds %f "
-        "_latest_cms_initial_mark_start_to_end_time_secs %f "
-        "_latest_cms_remark_start_to_end_time_secs %f "
-        "latest_cms_sum_concurrent_phases_time_secs %f "
-        "_latest_cms_ms_marking_start_to_end_time_secs %f "
-        "_latest_cms_ms_end_to_ms_start %f",
-        STW_in_foreground_in_seconds,
-        _latest_cms_initial_mark_start_to_end_time_secs,
-        _latest_cms_remark_start_to_end_time_secs,
-        latest_cms_sum_concurrent_phases_time_secs,
-        _latest_cms_ms_marking_start_to_end_time_secs,
-        _latest_cms_ms_end_to_ms_start);
-    }
-
-    double STW_marking_in_seconds = _latest_cms_initial_mark_start_to_end_time_secs +
-      _latest_cms_remark_start_to_end_time_secs;
-#ifndef CLOCK_GRANULARITY_TOO_LARGE
-    assert(_latest_cms_ms_marking_start_to_end_time_secs == 0.0 ||
-           latest_cms_sum_concurrent_phases_time_secs == 0.0,
-           "marking done twice?");
-#endif
-    double ms_time_in_seconds = STW_marking_in_seconds +
-      STW_in_foreground_in_seconds +
-      _latest_cms_ms_marking_start_to_end_time_secs +
-      scaled_concurrent_collection_time();
-    avg_ms_pause()->sample(ms_time_in_seconds);
-    // Use the STW costs from the initial mark and remark plus
-    // the cost of the concurrent phase to calculate a
-    // collection cost.
-    double cost = 0.0;
-    if ((_latest_cms_ms_end_to_ms_start > 0.0) &&
-        (ms_time_in_seconds > 0.0)) {
-      double interval_in_seconds =
-        _latest_cms_ms_end_to_ms_start + ms_time_in_seconds;
-
-      if (PrintAdaptiveSizePolicy && Verbose) {
-        gclog_or_tty->print_cr("\n ms_time_in_seconds  %f  "
-          "latest_cms_sum_concurrent_phases_time_secs %f  "
-          "interval_in_seconds %f",
-          ms_time_in_seconds,
-          latest_cms_sum_concurrent_phases_time_secs,
-          interval_in_seconds);
-      }
-
-      cost = collection_cost(ms_time_in_seconds, interval_in_seconds);
-
-      _avg_ms_gc_cost->sample(cost);
-      // Average this ms cost into all the other types gc costs
-      avg_major_gc_cost()->sample(cost);
-
-      // Sample for performance counter
-      _avg_ms_interval->sample(interval_in_seconds);
-    }
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print("cmsAdaptiveSizePolicy::ms_collection_end: "
-        "MS gc cost: %f  average: %f", cost, _avg_ms_gc_cost->average());
-
-      double ms_time_in_ms = ms_time_in_seconds * MILLIUNITS;
-      gclog_or_tty->print_cr("  MS pause: %f (ms) MS period %f (ms)",
-        ms_time_in_ms,
-        _latest_cms_ms_end_to_ms_start * MILLIUNITS);
-    }
-  }
-
-  // Consider putting this code (here to end) into a
-  // method for convenience.
-  clear_internal_time_intervals();
-
-  set_first_after_collection();
-
-  // The concurrent phases keeps track of it's own mutator interval
-  // with this timer.  This allows the stop-the-world phase to
-  // be included in the mutator time so that the stop-the-world time
-  // is not double counted.  Reset and start it.
-  _concurrent_timer.stop();
-  _concurrent_timer.reset();
-  _concurrent_timer.start();
-
-  _STW_timer.reset();
-  _STW_timer.start();
-}
-
-void CMSAdaptiveSizePolicy::clear_internal_time_intervals() {
-  _latest_cms_reset_end_to_initial_mark_start_secs = 0.0;
-  _latest_cms_initial_mark_end_to_remark_start_secs = 0.0;
-  _latest_cms_collection_end_to_collection_start_secs = 0.0;
-  _latest_cms_concurrent_marking_time_secs = 0.0;
-  _latest_cms_concurrent_precleaning_time_secs = 0.0;
-  _latest_cms_concurrent_sweeping_time_secs = 0.0;
-  _latest_cms_msc_end_to_msc_start_time_secs = 0.0;
-  _latest_cms_ms_end_to_ms_start = 0.0;
-  _latest_cms_remark_start_to_end_time_secs = 0.0;
-  _latest_cms_initial_mark_start_to_end_time_secs = 0.0;
-  _latest_cms_ms_marking_start_to_end_time_secs = 0.0;
-}
-
-void CMSAdaptiveSizePolicy::clear_generation_free_space_flags() {
-  AdaptiveSizePolicy::clear_generation_free_space_flags();
-
-  set_change_young_gen_for_maj_pauses(0);
-}
-
-void CMSAdaptiveSizePolicy::concurrent_phases_resume() {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->stamp();
-    gclog_or_tty->print_cr("CMSAdaptiveSizePolicy::concurrent_phases_resume()");
-  }
-  _concurrent_timer.start();
-}
-
-double CMSAdaptiveSizePolicy::time_since_major_gc() const {
-  _concurrent_timer.stop();
-  double time_since_cms_gc = _concurrent_timer.seconds();
-  _concurrent_timer.start();
-  _STW_timer.stop();
-  double time_since_STW_gc = _STW_timer.seconds();
-  _STW_timer.start();
-
-  return MIN2(time_since_cms_gc, time_since_STW_gc);
-}
-
-double CMSAdaptiveSizePolicy::major_gc_interval_average_for_decay() const {
-  double cms_interval = _avg_concurrent_interval->average();
-  double msc_interval = _avg_msc_interval->average();
-  double ms_interval = _avg_ms_interval->average();
-
-  return MAX3(cms_interval, msc_interval, ms_interval);
-}
-
-double CMSAdaptiveSizePolicy::cms_gc_cost() const {
-  return avg_major_gc_cost()->average();
-}
-
-void CMSAdaptiveSizePolicy::ms_collection_marking_begin() {
-  _STW_timer.stop();
-  // Start accumulating time for the marking in the STW timer.
-  _STW_timer.reset();
-  _STW_timer.start();
-}
-
-void CMSAdaptiveSizePolicy::ms_collection_marking_end(
-    GCCause::Cause gc_cause) {
-  _STW_timer.stop();
-  if (gc_cause != GCCause::_java_lang_system_gc ||
-      UseAdaptiveSizePolicyWithSystemGC) {
-    _latest_cms_ms_marking_start_to_end_time_secs = _STW_timer.seconds();
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("CMSAdaptiveSizePolicy::"
-        "msc_collection_marking_end: mutator time %f",
-        _latest_cms_ms_marking_start_to_end_time_secs);
-    }
-  }
-  _STW_timer.reset();
-  _STW_timer.start();
-}
-
-double CMSAdaptiveSizePolicy::gc_cost() const {
-  double cms_gen_cost = cms_gc_cost();
-  double result =  MIN2(1.0, minor_gc_cost() + cms_gen_cost);
-  assert(result >= 0.0, "Both minor and major costs are non-negative");
-  return result;
-}
-
-// Cost of collection (unit-less)
-double CMSAdaptiveSizePolicy::collection_cost(double pause_in_seconds,
-                                              double interval_in_seconds) {
-  // Cost of collection (unit-less)
-  double cost = 0.0;
-  if ((interval_in_seconds > 0.0) &&
-      (pause_in_seconds > 0.0)) {
-    cost =
-      pause_in_seconds / interval_in_seconds;
-  }
-  return cost;
-}
-
-size_t CMSAdaptiveSizePolicy::adjust_eden_for_pause_time(size_t cur_eden) {
-  size_t change = 0;
-  size_t desired_eden = cur_eden;
-
-  // reduce eden size
-  change = eden_decrement_aligned_down(cur_eden);
-  desired_eden = cur_eden - change;
-
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::adjust_eden_for_pause_time "
-      "adjusting eden for pause time. "
-      " starting eden size " SIZE_FORMAT
-      " reduced eden size " SIZE_FORMAT
-      " eden delta " SIZE_FORMAT,
-      cur_eden, desired_eden, change);
-  }
-
-  return desired_eden;
-}
-
-size_t CMSAdaptiveSizePolicy::adjust_eden_for_throughput(size_t cur_eden) {
-
-  size_t desired_eden = cur_eden;
-
-  set_change_young_gen_for_throughput(increase_young_gen_for_througput_true);
-
-  size_t change = eden_increment_aligned_up(cur_eden);
-  size_t scaled_change = scale_by_gen_gc_cost(change, minor_gc_cost());
-
-  if (cur_eden + scaled_change > cur_eden) {
-    desired_eden = cur_eden + scaled_change;
-  }
-
-  _young_gen_change_for_minor_throughput++;
-
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::adjust_eden_for_throughput "
-      "adjusting eden for throughput. "
-      " starting eden size " SIZE_FORMAT
-      " increased eden size " SIZE_FORMAT
-      " eden delta " SIZE_FORMAT,
-      cur_eden, desired_eden, scaled_change);
-  }
-
-  return desired_eden;
-}
-
-size_t CMSAdaptiveSizePolicy::adjust_eden_for_footprint(size_t cur_eden) {
-
-  set_decrease_for_footprint(decrease_young_gen_for_footprint_true);
-
-  size_t change = eden_decrement(cur_eden);
-  size_t desired_eden_size = cur_eden - change;
-
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::adjust_eden_for_footprint "
-      "adjusting eden for footprint. "
-      " starting eden size " SIZE_FORMAT
-      " reduced eden size " SIZE_FORMAT
-      " eden delta " SIZE_FORMAT,
-      cur_eden, desired_eden_size, change);
-  }
-  return desired_eden_size;
-}
-
-// The eden and promo versions should be combined if possible.
-// They are the same except that the sizes of the decrement
-// and increment are different for eden and promo.
-size_t CMSAdaptiveSizePolicy::eden_decrement_aligned_down(size_t cur_eden) {
-  size_t delta = eden_decrement(cur_eden);
-  return align_size_down(delta, generation_alignment());
-}
-
-size_t CMSAdaptiveSizePolicy::eden_increment_aligned_up(size_t cur_eden) {
-  size_t delta = eden_increment(cur_eden);
-  return align_size_up(delta, generation_alignment());
-}
-
-size_t CMSAdaptiveSizePolicy::promo_decrement_aligned_down(size_t cur_promo) {
-  size_t delta = promo_decrement(cur_promo);
-  return align_size_down(delta, generation_alignment());
-}
-
-size_t CMSAdaptiveSizePolicy::promo_increment_aligned_up(size_t cur_promo) {
-  size_t delta = promo_increment(cur_promo);
-  return align_size_up(delta, generation_alignment());
-}
-
-
-void CMSAdaptiveSizePolicy::compute_eden_space_size(size_t cur_eden,
-                                                    size_t max_eden_size)
-{
-  size_t desired_eden_size = cur_eden;
-  size_t eden_limit = max_eden_size;
-
-  // Printout input
-  if (PrintGC && PrintAdaptiveSizePolicy) {
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::compute_eden_space_size: "
-      "cur_eden " SIZE_FORMAT,
-      cur_eden);
-  }
-
-  // Used for diagnostics
-  clear_generation_free_space_flags();
-
-  if (_avg_minor_pause->padded_average() > gc_pause_goal_sec()) {
-    if (minor_pause_young_estimator()->decrement_will_decrease()) {
-      // If the minor pause is too long, shrink the young gen.
-      set_change_young_gen_for_min_pauses(
-        decrease_young_gen_for_min_pauses_true);
-      desired_eden_size = adjust_eden_for_pause_time(desired_eden_size);
-    }
-  } else if ((avg_remark_pause()->padded_average() > gc_pause_goal_sec()) ||
-             (avg_initial_pause()->padded_average() > gc_pause_goal_sec())) {
-    // The remark or initial pauses are not meeting the goal.  Should
-    // the generation be shrunk?
-    if (get_and_clear_first_after_collection() &&
-        ((avg_remark_pause()->padded_average() > gc_pause_goal_sec() &&
-          remark_pause_young_estimator()->decrement_will_decrease()) ||
-         (avg_initial_pause()->padded_average() > gc_pause_goal_sec() &&
-          initial_pause_young_estimator()->decrement_will_decrease()))) {
-
-       set_change_young_gen_for_maj_pauses(
-         decrease_young_gen_for_maj_pauses_true);
-
-      // If the remark or initial pause is too long and this is the
-      // first young gen collection after a cms collection, shrink
-      // the young gen.
-      desired_eden_size = adjust_eden_for_pause_time(desired_eden_size);
-    }
-    // If not the first young gen collection after a cms collection,
-    // don't do anything.  In this case an adjustment has already
-    // been made and the results of the adjustment has not yet been
-    // measured.
-  } else if ((minor_gc_cost() >= 0.0) &&
-             (adjusted_mutator_cost() < _throughput_goal)) {
-    desired_eden_size = adjust_eden_for_throughput(desired_eden_size);
-  } else {
-    desired_eden_size = adjust_eden_for_footprint(desired_eden_size);
-  }
-
-  if (PrintGC && PrintAdaptiveSizePolicy) {
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::compute_eden_space_size limits:"
-      " desired_eden_size: " SIZE_FORMAT
-      " old_eden_size: " SIZE_FORMAT,
-      desired_eden_size, cur_eden);
-  }
-
-  set_eden_size(desired_eden_size);
-}
-
-size_t CMSAdaptiveSizePolicy::adjust_promo_for_pause_time(size_t cur_promo) {
-  size_t change = 0;
-  size_t desired_promo = cur_promo;
-  // Move this test up to caller like the adjust_eden_for_pause_time()
-  // call.
-  if ((AdaptiveSizePausePolicy == 0) &&
-      ((avg_remark_pause()->padded_average() > gc_pause_goal_sec()) ||
-      (avg_initial_pause()->padded_average() > gc_pause_goal_sec()))) {
-    set_change_old_gen_for_maj_pauses(decrease_old_gen_for_maj_pauses_true);
-    change = promo_decrement_aligned_down(cur_promo);
-    desired_promo = cur_promo - change;
-  } else if ((AdaptiveSizePausePolicy > 0) &&
-      (((avg_remark_pause()->padded_average() > gc_pause_goal_sec()) &&
-       remark_pause_old_estimator()->decrement_will_decrease()) ||
-      ((avg_initial_pause()->padded_average() > gc_pause_goal_sec()) &&
-       initial_pause_old_estimator()->decrement_will_decrease()))) {
-    set_change_old_gen_for_maj_pauses(decrease_old_gen_for_maj_pauses_true);
-    change = promo_decrement_aligned_down(cur_promo);
-    desired_promo = cur_promo - change;
-  }
-
-  if ((change != 0) &&PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::adjust_promo_for_pause_time "
-      "adjusting promo for pause time. "
-      " starting promo size " SIZE_FORMAT
-      " reduced promo size " SIZE_FORMAT
-      " promo delta " SIZE_FORMAT,
-      cur_promo, desired_promo, change);
-  }
-
-  return desired_promo;
-}
-
-// Try to share this with PS.
-size_t CMSAdaptiveSizePolicy::scale_by_gen_gc_cost(size_t base_change,
-                                                  double gen_gc_cost) {
-
-  // Calculate the change to use for the tenured gen.
-  size_t scaled_change = 0;
-  // Can the increment to the generation be scaled?
-  if (gc_cost() >= 0.0 && gen_gc_cost >= 0.0) {
-    double scale_by_ratio = gen_gc_cost / gc_cost();
-    scaled_change =
-      (size_t) (scale_by_ratio * (double) base_change);
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr(
-        "Scaled tenured increment: " SIZE_FORMAT " by %f down to "
-          SIZE_FORMAT,
-        base_change, scale_by_ratio, scaled_change);
-    }
-  } else if (gen_gc_cost >= 0.0) {
-    // Scaling is not going to work.  If the major gc time is the
-    // larger than the other GC costs, give it a full increment.
-    if (gen_gc_cost >= (gc_cost() - gen_gc_cost)) {
-      scaled_change = base_change;
-    }
-  } else {
-    // Don't expect to get here but it's ok if it does
-    // in the product build since the delta will be 0
-    // and nothing will change.
-    assert(false, "Unexpected value for gc costs");
-  }
-
-  return scaled_change;
-}
-
-size_t CMSAdaptiveSizePolicy::adjust_promo_for_throughput(size_t cur_promo) {
-
-  size_t desired_promo = cur_promo;
-
-  set_change_old_gen_for_throughput(increase_old_gen_for_throughput_true);
-
-  size_t change = promo_increment_aligned_up(cur_promo);
-  size_t scaled_change = scale_by_gen_gc_cost(change, major_gc_cost());
-
-  if (cur_promo + scaled_change > cur_promo) {
-    desired_promo = cur_promo + scaled_change;
-  }
-
-  _old_gen_change_for_major_throughput++;
-
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::adjust_promo_for_throughput "
-      "adjusting promo for throughput. "
-      " starting promo size " SIZE_FORMAT
-      " increased promo size " SIZE_FORMAT
-      " promo delta " SIZE_FORMAT,
-      cur_promo, desired_promo, scaled_change);
-  }
-
-  return desired_promo;
-}
-
-size_t CMSAdaptiveSizePolicy::adjust_promo_for_footprint(size_t cur_promo,
-                                                         size_t cur_eden) {
-
-  set_decrease_for_footprint(decrease_young_gen_for_footprint_true);
-
-  size_t change = promo_decrement(cur_promo);
-  size_t desired_promo_size = cur_promo - change;
-
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::adjust_promo_for_footprint "
-      "adjusting promo for footprint. "
-      " starting promo size " SIZE_FORMAT
-      " reduced promo size " SIZE_FORMAT
-      " promo delta " SIZE_FORMAT,
-      cur_promo, desired_promo_size, change);
-  }
-  return desired_promo_size;
-}
-
-void CMSAdaptiveSizePolicy::compute_tenured_generation_free_space(
-                                size_t cur_tenured_free,
-                                size_t max_tenured_available,
-                                size_t cur_eden) {
-  // This can be bad if the desired value grows/shrinks without
-  // any connection to the read free space
-  size_t desired_promo_size = promo_size();
-  size_t tenured_limit = max_tenured_available;
-
-  // Printout input
-  if (PrintGC && PrintAdaptiveSizePolicy) {
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::compute_tenured_generation_free_space: "
-      "cur_tenured_free " SIZE_FORMAT
-      " max_tenured_available " SIZE_FORMAT,
-      cur_tenured_free, max_tenured_available);
-  }
-
-  // Used for diagnostics
-  clear_generation_free_space_flags();
-
-  set_decide_at_full_gc(decide_at_full_gc_true);
-  if (avg_remark_pause()->padded_average() > gc_pause_goal_sec() ||
-      avg_initial_pause()->padded_average() > gc_pause_goal_sec()) {
-    desired_promo_size = adjust_promo_for_pause_time(cur_tenured_free);
-  } else if (avg_minor_pause()->padded_average() > gc_pause_goal_sec()) {
-    // Nothing to do since the minor collections are too large and
-    // this method only deals with the cms generation.
-  } else if ((cms_gc_cost() >= 0.0) &&
-             (adjusted_mutator_cost() < _throughput_goal)) {
-    desired_promo_size = adjust_promo_for_throughput(cur_tenured_free);
-  } else {
-    desired_promo_size = adjust_promo_for_footprint(cur_tenured_free,
-                                                    cur_eden);
-  }
-
-  if (PrintGC && PrintAdaptiveSizePolicy) {
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::compute_tenured_generation_free_space limits:"
-      " desired_promo_size: " SIZE_FORMAT
-      " old_promo_size: " SIZE_FORMAT,
-      desired_promo_size, cur_tenured_free);
-  }
-
-  set_promo_size(desired_promo_size);
-}
-
-uint CMSAdaptiveSizePolicy::compute_survivor_space_size_and_threshold(
-                                             bool is_survivor_overflow,
-                                             uint tenuring_threshold,
-                                             size_t survivor_limit) {
-  assert(survivor_limit >= generation_alignment(),
-         "survivor_limit too small");
-  assert((size_t)align_size_down(survivor_limit, generation_alignment())
-         == survivor_limit, "survivor_limit not aligned");
-
-  // Change UsePSAdaptiveSurvivorSizePolicy -> UseAdaptiveSurvivorSizePolicy?
-  if (!UsePSAdaptiveSurvivorSizePolicy ||
-      !young_gen_policy_is_ready()) {
-    return tenuring_threshold;
-  }
-
-  // We'll decide whether to increase or decrease the tenuring
-  // threshold based partly on the newly computed survivor size
-  // (if we hit the maximum limit allowed, we'll always choose to
-  // decrement the threshold).
-  bool incr_tenuring_threshold = false;
-  bool decr_tenuring_threshold = false;
-
-  set_decrement_tenuring_threshold_for_gc_cost(false);
-  set_increment_tenuring_threshold_for_gc_cost(false);
-  set_decrement_tenuring_threshold_for_survivor_limit(false);
-
-  if (!is_survivor_overflow) {
-    // Keep running averages on how much survived
-
-    // We use the tenuring threshold to equalize the cost of major
-    // and minor collections.
-    // ThresholdTolerance is used to indicate how sensitive the
-    // tenuring threshold is to differences in cost between the
-    // collection types.
-
-    // Get the times of interest. This involves a little work, so
-    // we cache the values here.
-    const double major_cost = major_gc_cost();
-    const double minor_cost = minor_gc_cost();
-
-    if (minor_cost > major_cost * _threshold_tolerance_percent) {
-      // Minor times are getting too long;  lower the threshold so
-      // less survives and more is promoted.
-      decr_tenuring_threshold = true;
-      set_decrement_tenuring_threshold_for_gc_cost(true);
-    } else if (major_cost > minor_cost * _threshold_tolerance_percent) {
-      // Major times are too long, so we want less promotion.
-      incr_tenuring_threshold = true;
-      set_increment_tenuring_threshold_for_gc_cost(true);
-    }
-
-  } else {
-    // Survivor space overflow occurred, so promoted and survived are
-    // not accurate. We'll make our best guess by combining survived
-    // and promoted and count them as survivors.
-    //
-    // We'll lower the tenuring threshold to see if we can correct
-    // things. Also, set the survivor size conservatively. We're
-    // trying to avoid many overflows from occurring if defnew size
-    // is just too small.
-
-    decr_tenuring_threshold = true;
-  }
-
-  // The padded average also maintains a deviation from the average;
-  // we use this to see how good of an estimate we have of what survived.
-  // We're trying to pad the survivor size as little as possible without
-  // overflowing the survivor spaces.
-  size_t target_size = align_size_up((size_t)_avg_survived->padded_average(),
-                                     generation_alignment());
-  target_size = MAX2(target_size, generation_alignment());
-
-  if (target_size > survivor_limit) {
-    // Target size is bigger than we can handle. Let's also reduce
-    // the tenuring threshold.
-    target_size = survivor_limit;
-    decr_tenuring_threshold = true;
-    set_decrement_tenuring_threshold_for_survivor_limit(true);
-  }
-
-  // Finally, increment or decrement the tenuring threshold, as decided above.
-  // We test for decrementing first, as we might have hit the target size
-  // limit.
-  if (decr_tenuring_threshold && !(AlwaysTenure || NeverTenure)) {
-    if (tenuring_threshold > 1) {
-      tenuring_threshold--;
-    }
-  } else if (incr_tenuring_threshold && !(AlwaysTenure || NeverTenure)) {
-    if (tenuring_threshold < MaxTenuringThreshold) {
-      tenuring_threshold++;
-    }
-  }
-
-  // We keep a running average of the amount promoted which is used
-  // to decide when we should collect the old generation (when
-  // the amount of old gen free space is less than what we expect to
-  // promote).
-
-  if (PrintAdaptiveSizePolicy) {
-    // A little more detail if Verbose is on
-    GenCollectedHeap* gch = GenCollectedHeap::heap();
-    if (Verbose) {
-      gclog_or_tty->print( "  avg_survived: %f"
-                  "  avg_deviation: %f",
-                  _avg_survived->average(),
-                  _avg_survived->deviation());
-    }
-
-    gclog_or_tty->print( "  avg_survived_padded_avg: %f",
-                _avg_survived->padded_average());
-
-    if (Verbose) {
-      gclog_or_tty->print( "  avg_promoted_avg: %f"
-                  "  avg_promoted_dev: %f",
-                  gch->gc_stats(1)->avg_promoted()->average(),
-                  gch->gc_stats(1)->avg_promoted()->deviation());
-    }
-
-    gclog_or_tty->print( "  avg_promoted_padded_avg: %f"
-                "  avg_pretenured_padded_avg: %f"
-                "  tenuring_thresh: %u"
-                "  target_size: " SIZE_FORMAT
-                "  survivor_limit: " SIZE_FORMAT,
-                gch->gc_stats(1)->avg_promoted()->padded_average(),
-                _avg_pretenured->padded_average(),
-                tenuring_threshold, target_size, survivor_limit);
-    gclog_or_tty->cr();
-  }
-
-  set_survivor_size(target_size);
-
-  return tenuring_threshold;
-}
-
-bool CMSAdaptiveSizePolicy::get_and_clear_first_after_collection() {
-  bool result = _first_after_collection;
-  _first_after_collection = false;
-  return result;
-}
-
-bool CMSAdaptiveSizePolicy::print_adaptive_size_policy_on(
-                                                    outputStream* st) const {
-
-  if (!UseAdaptiveSizePolicy) {
-    return false;
-  }
-
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  Generation* young = gch->get_gen(0);
-  DefNewGeneration* def_new = young->as_DefNewGeneration();
-  return AdaptiveSizePolicy::print_adaptive_size_policy_on(
-                                         st,
-                                         def_new->tenuring_threshold());
-}
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,477 +0,0 @@
-/*
- * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSADAPTIVESIZEPOLICY_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSADAPTIVESIZEPOLICY_HPP
-
-#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
-#include "runtime/timer.hpp"
-
-// This class keeps statistical information and computes the
-// size of the heap for the concurrent mark sweep collector.
-//
-// Cost for garbage collector include cost for
-//   minor collection
-//   concurrent collection
-//      stop-the-world component
-//      concurrent component
-//   major compacting collection
-//      uses decaying cost
-
-// Forward decls
-class elapsedTimer;
-
-class CMSAdaptiveSizePolicy : public AdaptiveSizePolicy {
- friend class CMSGCAdaptivePolicyCounters;
- friend class CMSCollector;
- private:
-
-  // Total number of processors available
-  int _processor_count;
-  // Number of processors used by the concurrent phases of GC
-  // This number is assumed to be the same for all concurrent
-  // phases.
-  int _concurrent_processor_count;
-
-  // Time that the mutators run exclusive of a particular
-  // phase.  For example, the time the mutators run excluding
-  // the time during which the cms collector runs concurrently
-  // with the mutators.
-  //   Between end of most recent cms reset and start of initial mark
-                // This may be redundant
-  double _latest_cms_reset_end_to_initial_mark_start_secs;
-  //   Between end of the most recent initial mark and start of remark
-  double _latest_cms_initial_mark_end_to_remark_start_secs;
-  //   Between end of most recent collection and start of
-  //   a concurrent collection
-  double _latest_cms_collection_end_to_collection_start_secs;
-  //   Times of the concurrent phases of the most recent
-  //   concurrent collection
-  double _latest_cms_concurrent_marking_time_secs;
-  double _latest_cms_concurrent_precleaning_time_secs;
-  double _latest_cms_concurrent_sweeping_time_secs;
-  //   Between end of most recent STW MSC and start of next STW MSC
-  double _latest_cms_msc_end_to_msc_start_time_secs;
-  //   Between end of most recent MS and start of next MS
-  //   This does not include any time spent during a concurrent
-  // collection.
-  double _latest_cms_ms_end_to_ms_start;
-  //   Between start and end of the initial mark of the most recent
-  // concurrent collection.
-  double _latest_cms_initial_mark_start_to_end_time_secs;
-  //   Between start and end of the remark phase of the most recent
-  // concurrent collection
-  double _latest_cms_remark_start_to_end_time_secs;
-  //   Between start and end of the most recent MS STW marking phase
-  double _latest_cms_ms_marking_start_to_end_time_secs;
-
-  // Pause time timers
-  static elapsedTimer _STW_timer;
-  // Concurrent collection timer.  Used for total of all concurrent phases
-  // during 1 collection cycle.
-  static elapsedTimer _concurrent_timer;
-
-  // When the size of the generation is changed, the size
-  // of the change will rounded up or down (depending on the
-  // type of change) by this value.
-  size_t _generation_alignment;
-
-  // If this variable is true, the size of the young generation
-  // may be changed in order to reduce the pause(s) of the
-  // collection of the tenured generation in order to meet the
-  // pause time goal.  It is common to change the size of the
-  // tenured generation in order to meet the pause time goal
-  // for the tenured generation.  With the CMS collector for
-  // the tenured generation, the size of the young generation
-  // can have an significant affect on the pause times for collecting the
-  // tenured generation.
-  // This is a duplicate of a variable in PSAdaptiveSizePolicy.  It
-  // is duplicated because it is not clear that it is general enough
-  // to go into AdaptiveSizePolicy.
-  int _change_young_gen_for_maj_pauses;
-
-  // Variable that is set to true after a collection.
-  bool _first_after_collection;
-
-  // Fraction of collections that are of each type
-  double concurrent_fraction() const;
-  double STW_msc_fraction() const;
-  double STW_ms_fraction() const;
-
-  // This call cannot be put into the epilogue as long as some
-  // of the counters can be set during concurrent phases.
-  virtual void clear_generation_free_space_flags();
-
-  void set_first_after_collection() { _first_after_collection = true; }
-
- protected:
-  // Average of the sum of the concurrent times for
-  // one collection in seconds.
-  AdaptiveWeightedAverage* _avg_concurrent_time;
-  // Average time between concurrent collections in seconds.
-  AdaptiveWeightedAverage* _avg_concurrent_interval;
-  // Average cost of the concurrent part of a collection
-  // in seconds.
-  AdaptiveWeightedAverage* _avg_concurrent_gc_cost;
-
-  // Average of the initial pause of a concurrent collection in seconds.
-  AdaptivePaddedAverage* _avg_initial_pause;
-  // Average of the remark pause of a concurrent collection in seconds.
-  AdaptivePaddedAverage* _avg_remark_pause;
-
-  // Average of the stop-the-world (STW) (initial mark + remark)
-  // times in seconds for concurrent collections.
-  AdaptiveWeightedAverage* _avg_cms_STW_time;
-  // Average of the STW collection cost for concurrent collections.
-  AdaptiveWeightedAverage* _avg_cms_STW_gc_cost;
-
-  // Average of the bytes free at the start of the sweep.
-  AdaptiveWeightedAverage* _avg_cms_free_at_sweep;
-  // Average of the bytes free at the end of the collection.
-  AdaptiveWeightedAverage* _avg_cms_free;
-  // Average of the bytes promoted between cms collections.
-  AdaptiveWeightedAverage* _avg_cms_promo;
-
-  // stop-the-world (STW) mark-sweep-compact
-  // Average of the pause time in seconds for STW mark-sweep-compact
-  // collections.
-  AdaptiveWeightedAverage* _avg_msc_pause;
-  // Average of the interval in seconds between STW mark-sweep-compact
-  // collections.
-  AdaptiveWeightedAverage* _avg_msc_interval;
-  // Average of the collection costs for STW mark-sweep-compact
-  // collections.
-  AdaptiveWeightedAverage* _avg_msc_gc_cost;
-
-  // Averages for mark-sweep collections.
-  // The collection may have started as a background collection
-  // that completes in a stop-the-world (STW) collection.
-  // Average of the pause time in seconds for mark-sweep
-  // collections.
-  AdaptiveWeightedAverage* _avg_ms_pause;
-  // Average of the interval in seconds between mark-sweep
-  // collections.
-  AdaptiveWeightedAverage* _avg_ms_interval;
-  // Average of the collection costs for mark-sweep
-  // collections.
-  AdaptiveWeightedAverage* _avg_ms_gc_cost;
-
-  // These variables contain a linear fit of
-  // a generation size as the independent variable
-  // and a pause time as the dependent variable.
-  // For example _remark_pause_old_estimator
-  // is a fit of the old generation size as the
-  // independent variable and the remark pause
-  // as the dependent variable.
-  //   remark pause time vs. cms gen size
-  LinearLeastSquareFit* _remark_pause_old_estimator;
-  //   initial pause time vs. cms gen size
-  LinearLeastSquareFit* _initial_pause_old_estimator;
-  //   remark pause time vs. young gen size
-  LinearLeastSquareFit* _remark_pause_young_estimator;
-  //   initial pause time vs. young gen size
-  LinearLeastSquareFit* _initial_pause_young_estimator;
-
-  // Accessors
-  int processor_count() const { return _processor_count; }
-  int concurrent_processor_count() const { return _concurrent_processor_count; }
-
-  AdaptiveWeightedAverage* avg_concurrent_time() const {
-    return _avg_concurrent_time;
-  }
-
-  AdaptiveWeightedAverage* avg_concurrent_interval() const {
-    return _avg_concurrent_interval;
-  }
-
-  AdaptiveWeightedAverage* avg_concurrent_gc_cost() const {
-    return _avg_concurrent_gc_cost;
-  }
-
-  AdaptiveWeightedAverage* avg_cms_STW_time() const {
-    return _avg_cms_STW_time;
-  }
-
-  AdaptiveWeightedAverage* avg_cms_STW_gc_cost() const {
-    return _avg_cms_STW_gc_cost;
-  }
-
-  AdaptivePaddedAverage* avg_initial_pause() const {
-    return _avg_initial_pause;
-  }
-
-  AdaptivePaddedAverage* avg_remark_pause() const {
-    return _avg_remark_pause;
-  }
-
-  AdaptiveWeightedAverage* avg_cms_free() const {
-    return _avg_cms_free;
-  }
-
-  AdaptiveWeightedAverage* avg_cms_free_at_sweep() const {
-    return _avg_cms_free_at_sweep;
-  }
-
-  AdaptiveWeightedAverage* avg_msc_pause() const {
-    return _avg_msc_pause;
-  }
-
-  AdaptiveWeightedAverage* avg_msc_interval() const {
-    return _avg_msc_interval;
-  }
-
-  AdaptiveWeightedAverage* avg_msc_gc_cost() const {
-    return _avg_msc_gc_cost;
-  }
-
-  AdaptiveWeightedAverage* avg_ms_pause() const {
-    return _avg_ms_pause;
-  }
-
-  AdaptiveWeightedAverage* avg_ms_interval() const {
-    return _avg_ms_interval;
-  }
-
-  AdaptiveWeightedAverage* avg_ms_gc_cost() const {
-    return _avg_ms_gc_cost;
-  }
-
-  LinearLeastSquareFit* remark_pause_old_estimator() {
-    return _remark_pause_old_estimator;
-  }
-  LinearLeastSquareFit* initial_pause_old_estimator() {
-    return _initial_pause_old_estimator;
-  }
-  LinearLeastSquareFit* remark_pause_young_estimator() {
-    return _remark_pause_young_estimator;
-  }
-  LinearLeastSquareFit* initial_pause_young_estimator() {
-    return _initial_pause_young_estimator;
-  }
-
-  // These *slope() methods return the slope
-  // m for the linear fit of an independent
-  // variable vs. a dependent variable.  For
-  // example
-  //  remark_pause = m * old_generation_size + c
-  // These may be used to determine if an
-  // adjustment should be made to achieve a goal.
-  // For example, if remark_pause_old_slope() is
-  // positive, a reduction of the old generation
-  // size has on average resulted in the reduction
-  // of the remark pause.
-  float remark_pause_old_slope() {
-    return _remark_pause_old_estimator->slope();
-  }
-
-  float initial_pause_old_slope() {
-    return _initial_pause_old_estimator->slope();
-  }
-
-  float remark_pause_young_slope() {
-    return _remark_pause_young_estimator->slope();
-  }
-
-  float initial_pause_young_slope() {
-    return _initial_pause_young_estimator->slope();
-  }
-
-  // Update estimators
-  void update_minor_pause_old_estimator(double minor_pause_in_ms);
-
-  // Fraction of processors used by the concurrent phases.
-  double concurrent_processor_fraction();
-
-  // Returns the total times for the concurrent part of the
-  // latest collection in seconds.
-  double concurrent_collection_time();
-
-  // Return the total times for the concurrent part of the
-  // latest collection in seconds where the times of the various
-  // concurrent phases are scaled by the processor fraction used
-  // during the phase.
-  double scaled_concurrent_collection_time();
-
-  // Dimensionless concurrent GC cost for all the concurrent phases.
-  double concurrent_collection_cost(double interval_in_seconds);
-
-  // Dimensionless GC cost
-  double collection_cost(double pause_in_seconds, double interval_in_seconds);
-
-  virtual GCPolicyKind kind() const { return _gc_cms_adaptive_size_policy; }
-
-  virtual double time_since_major_gc() const;
-
-  // This returns the maximum average for the concurrent, ms, and
-  // msc collections.  This is meant to be used for the calculation
-  // of the decayed major gc cost and is not in general the
-  // average of all the different types of major collections.
-  virtual double major_gc_interval_average_for_decay() const;
-
- public:
-  CMSAdaptiveSizePolicy(size_t init_eden_size,
-                        size_t init_promo_size,
-                        size_t init_survivor_size,
-                        double max_gc_minor_pause_sec,
-                        double max_gc_pause_sec,
-                        uint gc_cost_ratio);
-
-  // The timers for the stop-the-world phases measure a total
-  // stop-the-world time.  The timer is started and stopped
-  // for each phase but is only reset after the final checkpoint.
-  void checkpoint_roots_initial_begin();
-  void checkpoint_roots_initial_end(GCCause::Cause gc_cause);
-  void checkpoint_roots_final_begin();
-  void checkpoint_roots_final_end(GCCause::Cause gc_cause);
-
-  // Methods for gathering information about the
-  // concurrent marking phase of the collection.
-  // Records the mutator times and
-  // resets the concurrent timer.
-  void concurrent_marking_begin();
-  // Resets concurrent phase timer in the begin methods and
-  // saves the time for a phase in the end methods.
-  void concurrent_marking_end();
-  void concurrent_sweeping_begin();
-  void concurrent_sweeping_end();
-  // Similar to the above (e.g., concurrent_marking_end()) and
-  // is used for both the precleaning an abortable precleaning
-  // phases.
-  void concurrent_precleaning_begin();
-  void concurrent_precleaning_end();
-  // Stops the concurrent phases time.  Gathers
-  // information and resets the timer.
-  void concurrent_phases_end(GCCause::Cause gc_cause,
-                              size_t cur_eden,
-                              size_t cur_promo);
-
-  // Methods for gather information about STW Mark-Sweep-Compact
-  void msc_collection_begin();
-  void msc_collection_end(GCCause::Cause gc_cause);
-
-  // Methods for gather information about Mark-Sweep done
-  // in the foreground.
-  void ms_collection_begin();
-  void ms_collection_end(GCCause::Cause gc_cause);
-
-  // Cost for a mark-sweep tenured gen collection done in the foreground
-  double ms_gc_cost() const {
-    return MAX2(0.0F, _avg_ms_gc_cost->average());
-  }
-
-  // Cost of collecting the tenured generation.  Includes
-  // concurrent collection and STW collection costs
-  double cms_gc_cost() const;
-
-  // Cost of STW mark-sweep-compact tenured gen collection.
-  double msc_gc_cost() const {
-    return MAX2(0.0F, _avg_msc_gc_cost->average());
-  }
-
-  //
-  double compacting_gc_cost() const {
-    double result = MIN2(1.0, minor_gc_cost() + msc_gc_cost());
-    assert(result >= 0.0, "Both minor and major costs are non-negative");
-    return result;
-  }
-
-   // Restarts the concurrent phases timer.
-   void concurrent_phases_resume();
-
-   // Time beginning and end of the marking phase for
-   // a synchronous MS collection.  A MS collection
-   // that finishes in the foreground can have started
-   // in the background.  These methods capture the
-   // completion of the marking (after the initial
-   // marking) that is done in the foreground.
-   void ms_collection_marking_begin();
-   void ms_collection_marking_end(GCCause::Cause gc_cause);
-
-   static elapsedTimer* concurrent_timer_ptr() {
-     return &_concurrent_timer;
-   }
-
-  AdaptiveWeightedAverage* avg_cms_promo() const {
-    return _avg_cms_promo;
-  }
-
-  int change_young_gen_for_maj_pauses() {
-    return _change_young_gen_for_maj_pauses;
-  }
-  void set_change_young_gen_for_maj_pauses(int v) {
-    _change_young_gen_for_maj_pauses = v;
-  }
-
-  void clear_internal_time_intervals();
-
-
-  // Either calculated_promo_size_in_bytes() or promo_size()
-  // should be deleted.
-  size_t promo_size() { return _promo_size; }
-  void set_promo_size(size_t v) { _promo_size = v; }
-
-  // Cost of GC for all types of collections.
-  virtual double gc_cost() const;
-
-  size_t generation_alignment() { return _generation_alignment; }
-
-  virtual void compute_eden_space_size(size_t cur_eden,
-                                       size_t max_eden_size);
-  // Calculates new survivor space size;  returns a new tenuring threshold
-  // value. Stores new survivor size in _survivor_size.
-  virtual uint compute_survivor_space_size_and_threshold(
-                                                bool   is_survivor_overflow,
-                                                uint   tenuring_threshold,
-                                                size_t survivor_limit);
-
-  virtual void compute_tenured_generation_free_space(size_t cur_tenured_free,
-                                           size_t max_tenured_available,
-                                           size_t cur_eden);
-
-  size_t eden_decrement_aligned_down(size_t cur_eden);
-  size_t eden_increment_aligned_up(size_t cur_eden);
-
-  size_t adjust_eden_for_pause_time(size_t cur_eden);
-  size_t adjust_eden_for_throughput(size_t cur_eden);
-  size_t adjust_eden_for_footprint(size_t cur_eden);
-
-  size_t promo_decrement_aligned_down(size_t cur_promo);
-  size_t promo_increment_aligned_up(size_t cur_promo);
-
-  size_t adjust_promo_for_pause_time(size_t cur_promo);
-  size_t adjust_promo_for_throughput(size_t cur_promo);
-  size_t adjust_promo_for_footprint(size_t cur_promo, size_t cur_eden);
-
-  // Scale down the input size by the ratio of the cost to collect the
-  // generation to the total GC cost.
-  size_t scale_by_gen_gc_cost(size_t base_change, double gen_gc_cost);
-
-  // Return the value and clear it.
-  bool get_and_clear_first_after_collection();
-
-  // Printing support
-  virtual bool print_adaptive_size_policy_on(outputStream* st) const;
-};
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSADAPTIVESIZEPOLICY_HPP
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -23,9 +23,8 @@
  */
 
 #include "precompiled.hpp"
-#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
+#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
-#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
 #include "gc_implementation/parNew/parNewGeneration.hpp"
 #include "gc_implementation/shared/gcPolicyCounters.hpp"
 #include "gc_implementation/shared/vmGCOperations.hpp"
@@ -57,25 +56,12 @@
   if (_generations == NULL)
     vm_exit_during_initialization("Unable to allocate gen spec");
 
-  if (UseParNewGC) {
-    if (UseAdaptiveSizePolicy) {
-      _generations[0] = new GenerationSpec(Generation::ASParNew,
-                                           _initial_young_size, _max_young_size);
-    } else {
-      _generations[0] = new GenerationSpec(Generation::ParNew,
-                                           _initial_young_size, _max_young_size);
-    }
-  } else {
-    _generations[0] = new GenerationSpec(Generation::DefNew,
-                                         _initial_young_size, _max_young_size);
-  }
-  if (UseAdaptiveSizePolicy) {
-    _generations[1] = new GenerationSpec(Generation::ASConcurrentMarkSweep,
-                                         _initial_old_size, _max_old_size);
-  } else {
-    _generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep,
-                                         _initial_old_size, _max_old_size);
-  }
+  Generation::Name yg_name =
+    UseParNewGC ? Generation::ParNew : Generation::DefNew;
+  _generations[0] = new GenerationSpec(yg_name, _initial_young_size,
+                                       _max_young_size);
+  _generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep,
+                                       _initial_old_size, _max_old_size);
 
   if (_generations[0] == NULL || _generations[1] == NULL) {
     vm_exit_during_initialization("Unable to allocate gen spec");
@@ -85,14 +71,12 @@
 void ConcurrentMarkSweepPolicy::initialize_size_policy(size_t init_eden_size,
                                                size_t init_promo_size,
                                                size_t init_survivor_size) {
-  double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
-  _size_policy = new CMSAdaptiveSizePolicy(init_eden_size,
-                                           init_promo_size,
-                                           init_survivor_size,
-                                           max_gc_minor_pause_sec,
-                                           max_gc_pause_sec,
-                                           GCTimeRatio);
+  _size_policy = new AdaptiveSizePolicy(init_eden_size,
+                                        init_promo_size,
+                                        init_survivor_size,
+                                        max_gc_pause_sec,
+                                        GCTimeRatio);
 }
 
 void ConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
@@ -110,22 +94,3 @@
 {
   return CMSIncrementalMode;
 }
-
-
-//
-// ASConcurrentMarkSweepPolicy methods
-//
-
-void ASConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
-
-  assert(size_policy() != NULL, "A size policy is required");
-  // initialize the policy counters - 2 collectors, 3 generations
-  if (UseParNewGC) {
-    _gc_policy_counters = new CMSGCAdaptivePolicyCounters("ParNew:CMS", 2, 3,
-      size_policy());
-  }
-  else {
-    _gc_policy_counters = new CMSGCAdaptivePolicyCounters("Copy:CMS", 2, 3,
-      size_policy());
-  }
-}
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -47,19 +47,4 @@
   virtual bool has_soft_ended_eden();
 };
 
-class ASConcurrentMarkSweepPolicy : public ConcurrentMarkSweepPolicy {
- public:
-
-  // Initialize the jstat counters.  This method requires a
-  // size policy.  The size policy is expected to be created
-  // after the generations are fully initialized so the
-  // initialization of the counters need to be done post
-  // the initialization of the generations.
-  void initialize_gc_policy_counters();
-
-  virtual CollectorPolicy::Name kind() {
-    return CollectorPolicy::ASConcurrentMarkSweepPolicyKind;
-  }
-};
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSCOLLECTORPOLICY_HPP
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,303 +0,0 @@
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
-#include "memory/resourceArea.hpp"
-
-CMSGCAdaptivePolicyCounters::CMSGCAdaptivePolicyCounters(const char* name_arg,
-                                        int collectors,
-                                        int generations,
-                                        AdaptiveSizePolicy* size_policy_arg)
-        : GCAdaptivePolicyCounters(name_arg,
-                                   collectors,
-                                   generations,
-                                   size_policy_arg) {
-  if (UsePerfData) {
-    EXCEPTION_MARK;
-    ResourceMark rm;
-
-    const char* cname =
-      PerfDataManager::counter_name(name_space(), "cmsCapacity");
-    _cms_capacity_counter = PerfDataManager::create_variable(SUN_GC, cname,
-      PerfData::U_Bytes, (jlong) OldSize, CHECK);
-#ifdef NOT_PRODUCT
-    cname =
-      PerfDataManager::counter_name(name_space(), "initialPause");
-    _initial_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
-      PerfData::U_Ticks,
-      (jlong) cms_size_policy()->avg_initial_pause()->last_sample(),
-      CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "remarkPause");
-    _remark_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
-      PerfData::U_Ticks,
-      (jlong) cms_size_policy()->avg_remark_pause()->last_sample(),
-      CHECK);
-#endif
-    cname =
-      PerfDataManager::counter_name(name_space(), "avgInitialPause");
-    _avg_initial_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
-      PerfData::U_Ticks,
-      (jlong) cms_size_policy()->avg_initial_pause()->average(),
-      CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgRemarkPause");
-    _avg_remark_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
-    PerfData::U_Ticks,
-      (jlong) cms_size_policy()->avg_remark_pause()->average(),
-      CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgSTWGcCost");
-    _avg_cms_STW_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-      (jlong) cms_size_policy()->avg_cms_STW_gc_cost()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgSTWTime");
-    _avg_cms_STW_time_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-      (jlong) cms_size_policy()->avg_cms_STW_time()->average(),
-        CHECK);
-
-
-    cname = PerfDataManager::counter_name(name_space(), "avgConcurrentTime");
-    _avg_concurrent_time_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_concurrent_time()->average(),
-        CHECK);
-
-    cname =
-      PerfDataManager::counter_name(name_space(), "avgConcurrentInterval");
-    _avg_concurrent_interval_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_concurrent_interval()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgConcurrentGcCost");
-    _avg_concurrent_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_concurrent_gc_cost()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgCMSFreeAtSweep");
-    _avg_cms_free_at_sweep_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_cms_free_at_sweep()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgCMSFree");
-    _avg_cms_free_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_cms_free()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgCMSPromo");
-    _avg_cms_promo_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_cms_promo()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgMscPause");
-    _avg_msc_pause_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_msc_pause()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgMscInterval");
-    _avg_msc_interval_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_msc_interval()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "mscGcCost");
-    _msc_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_msc_gc_cost()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgMsPause");
-    _avg_ms_pause_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_ms_pause()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgMsInterval");
-    _avg_ms_interval_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_ms_interval()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "msGcCost");
-    _ms_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_ms_gc_cost()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "majorGcCost");
-    _major_gc_cost_counter = PerfDataManager::create_variable(SUN_GC, cname,
-       PerfData::U_Ticks, (jlong) cms_size_policy()->cms_gc_cost(), CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgPromotedAvg");
-    _promoted_avg_counter =
-      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
-        cms_size_policy()->calculated_promo_size_in_bytes(), CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgPromotedDev");
-    _promoted_avg_dev_counter =
-      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
-        (jlong) 0 , CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgPromotedPaddedAvg");
-    _promoted_padded_avg_counter =
-      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
-        cms_size_policy()->calculated_promo_size_in_bytes(), CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(),
-      "changeYoungGenForMajPauses");
-    _change_young_gen_for_maj_pauses_counter =
-      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Events,
-        (jlong)0, CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "remarkPauseOldSlope");
-    _remark_pause_old_slope_counter =
-      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
-        (jlong) cms_size_policy()->remark_pause_old_slope(), CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "initialPauseOldSlope");
-    _initial_pause_old_slope_counter =
-      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
-        (jlong) cms_size_policy()->initial_pause_old_slope(), CHECK);
-
-    cname =
-      PerfDataManager::counter_name(name_space(), "remarkPauseYoungSlope") ;
-    _remark_pause_young_slope_counter =
-      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
-        (jlong) cms_size_policy()->remark_pause_young_slope(), CHECK);
-
-    cname =
-      PerfDataManager::counter_name(name_space(), "initialPauseYoungSlope");
-    _initial_pause_young_slope_counter =
-      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
-        (jlong) cms_size_policy()->initial_pause_young_slope(), CHECK);
-
-
-  }
-  assert(size_policy()->is_gc_cms_adaptive_size_policy(),
-    "Wrong type of size policy");
-}
-
-void CMSGCAdaptivePolicyCounters::update_counters() {
-  if (UsePerfData) {
-    GCAdaptivePolicyCounters::update_counters_from_policy();
-    update_counters_from_policy();
-  }
-}
-
-void CMSGCAdaptivePolicyCounters::update_counters(CMSGCStats* gc_stats) {
-  if (UsePerfData) {
-    update_counters();
-    update_promoted((size_t) gc_stats->avg_promoted()->last_sample());
-    update_avg_promoted_avg(gc_stats);
-    update_avg_promoted_dev(gc_stats);
-    update_avg_promoted_padded_avg(gc_stats);
-  }
-}
-
-void CMSGCAdaptivePolicyCounters::update_counters_from_policy() {
-  if (UsePerfData && (cms_size_policy() != NULL)) {
-
-    GCAdaptivePolicyCounters::update_counters_from_policy();
-
-    update_major_gc_cost_counter();
-    update_mutator_cost_counter();
-
-    update_eden_size();
-    update_promo_size();
-
-    // If these updates from the last_sample() work,
-    // revise the update methods for these counters
-    // (both here and in PS).
-    update_survived((size_t) cms_size_policy()->avg_survived()->last_sample());
-
-    update_avg_concurrent_time_counter();
-    update_avg_concurrent_interval_counter();
-    update_avg_concurrent_gc_cost_counter();
-#ifdef NOT_PRODUCT
-    update_initial_pause_counter();
-    update_remark_pause_counter();
-#endif
-    update_avg_initial_pause_counter();
-    update_avg_remark_pause_counter();
-
-    update_avg_cms_STW_time_counter();
-    update_avg_cms_STW_gc_cost_counter();
-
-    update_avg_cms_free_counter();
-    update_avg_cms_free_at_sweep_counter();
-    update_avg_cms_promo_counter();
-
-    update_avg_msc_pause_counter();
-    update_avg_msc_interval_counter();
-    update_msc_gc_cost_counter();
-
-    update_avg_ms_pause_counter();
-    update_avg_ms_interval_counter();
-    update_ms_gc_cost_counter();
-
-    update_avg_old_live_counter();
-
-    update_survivor_size_counters();
-    update_avg_survived_avg_counters();
-    update_avg_survived_dev_counters();
-
-    update_decrement_tenuring_threshold_for_gc_cost();
-    update_increment_tenuring_threshold_for_gc_cost();
-    update_decrement_tenuring_threshold_for_survivor_limit();
-
-    update_change_young_gen_for_maj_pauses();
-
-    update_major_collection_slope_counter();
-    update_remark_pause_old_slope_counter();
-    update_initial_pause_old_slope_counter();
-    update_remark_pause_young_slope_counter();
-    update_initial_pause_young_slope_counter();
-
-    update_decide_at_full_gc_counter();
-  }
-}
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,308 +0,0 @@
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSGCADAPTIVEPOLICYCOUNTERS_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSGCADAPTIVEPOLICYCOUNTERS_HPP
-
-#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
-#include "gc_implementation/shared/gcAdaptivePolicyCounters.hpp"
-#include "gc_implementation/shared/gcStats.hpp"
-#include "runtime/perfData.hpp"
-
-// CMSGCAdaptivePolicyCounters is a holder class for performance counters
-// that track the data and decisions for the ergonomics policy for the
-// concurrent mark sweep collector
-
-class CMSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
-  friend class VMStructs;
-
- private:
-
-  // Capacity of tenured generation recorded at the end of
-  // any collection.
-  PerfVariable* _cms_capacity_counter; // Make this common with PS _old_capacity
-
-  // Average stop-the-world pause time for both initial and
-  // remark pauses sampled at the end of the checkpointRootsFinalWork.
-  PerfVariable* _avg_cms_STW_time_counter;
-  // Average stop-the-world (STW) GC cost for the STW pause time
-  // _avg_cms_STW_time_counter.
-  PerfVariable* _avg_cms_STW_gc_cost_counter;
-
-#ifdef NOT_PRODUCT
-  // These are useful to see how the most recent values of these
-  // counters compare to their respective averages but
-  // do not control behavior.
-  PerfVariable* _initial_pause_counter;
-  PerfVariable* _remark_pause_counter;
-#endif
-
-  // Average of the initial marking pause for a concurrent collection.
-  PerfVariable* _avg_initial_pause_counter;
-  // Average of the remark pause for a concurrent collection.
-  PerfVariable* _avg_remark_pause_counter;
-
-  // Average for the sum of all the concurrent times per collection.
-  PerfVariable* _avg_concurrent_time_counter;
-  // Average for the time between the most recent end of a
-  // concurrent collection and the beginning of the next
-  // concurrent collection.
-  PerfVariable* _avg_concurrent_interval_counter;
-  // Average of the concurrent GC costs based on _avg_concurrent_time_counter
-  // and _avg_concurrent_interval_counter.
-  PerfVariable* _avg_concurrent_gc_cost_counter;
-
-  // Average of the free space in the tenured generation at the
-  // end of the sweep of the tenured generation.
-  PerfVariable* _avg_cms_free_counter;
-  // Average of the free space in the tenured generation at the
-  // start of the sweep of the tenured generation.
-  PerfVariable* _avg_cms_free_at_sweep_counter;
-  // Average of the free space in the tenured generation at the
-  // after any resizing of the tenured generation at the end
-  // of a collection of the tenured generation.
-  PerfVariable* _avg_cms_promo_counter;
-
-  // Average of  the mark-sweep-compact (MSC) pause time for a collection
-  // of the tenured generation.
-  PerfVariable* _avg_msc_pause_counter;
-  // Average for the time between the most recent end of a
-  // MSC collection and the beginning of the next MSC collection.
-  PerfVariable* _avg_msc_interval_counter;
-  // Average for the GC cost of a MSC collection based on
-  // _avg_msc_pause_counter and _avg_msc_interval_counter.
-  PerfVariable* _msc_gc_cost_counter;
-
-  // Average of  the mark-sweep (MS) pause time for a collection
-  // of the tenured generation.
-  PerfVariable* _avg_ms_pause_counter;
-  // Average for the time between the most recent end of a
-  // MS collection and the beginning of the next MS collection.
-  PerfVariable* _avg_ms_interval_counter;
-  // Average for the GC cost of a MS collection based on
-  // _avg_ms_pause_counter and _avg_ms_interval_counter.
-  PerfVariable* _ms_gc_cost_counter;
-
-  // Average of the bytes promoted per minor collection.
-  PerfVariable* _promoted_avg_counter;
-  // Average of the deviation of the promoted average.
-  PerfVariable* _promoted_avg_dev_counter;
-  // Padded average of the bytes promoted per minor collection.
-  PerfVariable* _promoted_padded_avg_counter;
-
-  // See description of the _change_young_gen_for_maj_pauses
-  // variable recently in cmsAdaptiveSizePolicy.hpp.
-  PerfVariable* _change_young_gen_for_maj_pauses_counter;
-
-  // See descriptions of _remark_pause_old_slope, _initial_pause_old_slope,
-  // etc. variables recently in cmsAdaptiveSizePolicy.hpp.
-  PerfVariable* _remark_pause_old_slope_counter;
-  PerfVariable* _initial_pause_old_slope_counter;
-  PerfVariable* _remark_pause_young_slope_counter;
-  PerfVariable* _initial_pause_young_slope_counter;
-
-  CMSAdaptiveSizePolicy* cms_size_policy() {
-    assert(_size_policy->kind() ==
-      AdaptiveSizePolicy::_gc_cms_adaptive_size_policy,
-      "Wrong size policy");
-    return (CMSAdaptiveSizePolicy*)_size_policy;
-  }
-
-  inline void update_avg_cms_STW_time_counter() {
-    _avg_cms_STW_time_counter->set_value(
-      (jlong) (cms_size_policy()->avg_cms_STW_time()->average() *
-      (double) MILLIUNITS));
-  }
-
-  inline void update_avg_cms_STW_gc_cost_counter() {
-    _avg_cms_STW_gc_cost_counter->set_value(
-      (jlong) (cms_size_policy()->avg_cms_STW_gc_cost()->average() * 100.0));
-  }
-
-  inline void update_avg_initial_pause_counter() {
-    _avg_initial_pause_counter->set_value(
-      (jlong) (cms_size_policy()->avg_initial_pause()->average() *
-      (double) MILLIUNITS));
-  }
-#ifdef NOT_PRODUCT
-  inline void update_avg_remark_pause_counter() {
-    _avg_remark_pause_counter->set_value(
-      (jlong) (cms_size_policy()-> avg_remark_pause()->average() *
-      (double) MILLIUNITS));
-  }
-
-  inline void update_initial_pause_counter() {
-    _initial_pause_counter->set_value(
-      (jlong) (cms_size_policy()->avg_initial_pause()->average() *
-      (double) MILLIUNITS));
-  }
-#endif
-  inline void update_remark_pause_counter() {
-    _remark_pause_counter->set_value(
-      (jlong) (cms_size_policy()-> avg_remark_pause()->last_sample() *
-      (double) MILLIUNITS));
-  }
-
-  inline void update_avg_concurrent_time_counter() {
-    _avg_concurrent_time_counter->set_value(
-      (jlong) (cms_size_policy()->avg_concurrent_time()->last_sample() *
-      (double) MILLIUNITS));
-  }
-
-  inline void update_avg_concurrent_interval_counter() {
-    _avg_concurrent_interval_counter->set_value(
-      (jlong) (cms_size_policy()->avg_concurrent_interval()->average() *
-      (double) MILLIUNITS));
-  }
-
-  inline void update_avg_concurrent_gc_cost_counter() {
-    _avg_concurrent_gc_cost_counter->set_value(
-      (jlong) (cms_size_policy()->avg_concurrent_gc_cost()->average() * 100.0));
-  }
-
-  inline void update_avg_cms_free_counter() {
-    _avg_cms_free_counter->set_value(
-      (jlong) cms_size_policy()->avg_cms_free()->average());
-  }
-
-  inline void update_avg_cms_free_at_sweep_counter() {
-    _avg_cms_free_at_sweep_counter->set_value(
-      (jlong) cms_size_policy()->avg_cms_free_at_sweep()->average());
-  }
-
-  inline void update_avg_cms_promo_counter() {
-    _avg_cms_promo_counter->set_value(
-      (jlong) cms_size_policy()->avg_cms_promo()->average());
-  }
-
-  inline void update_avg_old_live_counter() {
-    _avg_old_live_counter->set_value(
-      (jlong)(cms_size_policy()->avg_old_live()->average())
-    );
-  }
-
-  inline void update_avg_msc_pause_counter() {
-    _avg_msc_pause_counter->set_value(
-      (jlong) (cms_size_policy()->avg_msc_pause()->average() *
-      (double) MILLIUNITS));
-  }
-
-  inline void update_avg_msc_interval_counter() {
-    _avg_msc_interval_counter->set_value(
-      (jlong) (cms_size_policy()->avg_msc_interval()->average() *
-      (double) MILLIUNITS));
-  }
-
-  inline void update_msc_gc_cost_counter() {
-    _msc_gc_cost_counter->set_value(
-      (jlong) (cms_size_policy()->avg_msc_gc_cost()->average() * 100.0));
-  }
-
-  inline void update_avg_ms_pause_counter() {
-    _avg_ms_pause_counter->set_value(
-      (jlong) (cms_size_policy()->avg_ms_pause()->average() *
-      (double) MILLIUNITS));
-  }
-
-  inline void update_avg_ms_interval_counter() {
-    _avg_ms_interval_counter->set_value(
-      (jlong) (cms_size_policy()->avg_ms_interval()->average() *
-      (double) MILLIUNITS));
-  }
-
-  inline void update_ms_gc_cost_counter() {
-    _ms_gc_cost_counter->set_value(
-      (jlong) (cms_size_policy()->avg_ms_gc_cost()->average() * 100.0));
-  }
-
-  inline void update_major_gc_cost_counter() {
-    _major_gc_cost_counter->set_value(
-      (jlong)(cms_size_policy()->cms_gc_cost() * 100.0)
-    );
-  }
-  inline void update_mutator_cost_counter() {
-    _mutator_cost_counter->set_value(
-      (jlong)(cms_size_policy()->mutator_cost() * 100.0)
-    );
-  }
-
-  inline void update_avg_promoted_avg(CMSGCStats* gc_stats) {
-    _promoted_avg_counter->set_value(
-      (jlong)(gc_stats->avg_promoted()->average())
-    );
-  }
-  inline void update_avg_promoted_dev(CMSGCStats* gc_stats) {
-    _promoted_avg_dev_counter->set_value(
-      (jlong)(gc_stats->avg_promoted()->deviation())
-    );
-  }
-  inline void update_avg_promoted_padded_avg(CMSGCStats* gc_stats) {
-    _promoted_padded_avg_counter->set_value(
-      (jlong)(gc_stats->avg_promoted()->padded_average())
-    );
-  }
-  inline void update_remark_pause_old_slope_counter() {
-    _remark_pause_old_slope_counter->set_value(
-      (jlong)(cms_size_policy()->remark_pause_old_slope() * 1000)
-    );
-  }
-  inline void update_initial_pause_old_slope_counter() {
-    _initial_pause_old_slope_counter->set_value(
-      (jlong)(cms_size_policy()->initial_pause_old_slope() * 1000)
-    );
-  }
-  inline void update_remark_pause_young_slope_counter() {
-    _remark_pause_young_slope_counter->set_value(
-      (jlong)(cms_size_policy()->remark_pause_young_slope() * 1000)
-    );
-  }
-  inline void update_initial_pause_young_slope_counter() {
-    _initial_pause_young_slope_counter->set_value(
-      (jlong)(cms_size_policy()->initial_pause_young_slope() * 1000)
-    );
-  }
-  inline void update_change_young_gen_for_maj_pauses() {
-    _change_young_gen_for_maj_pauses_counter->set_value(
-      cms_size_policy()->change_young_gen_for_maj_pauses());
-  }
-
- public:
-  CMSGCAdaptivePolicyCounters(const char* name, int collectors, int generations,
-                              AdaptiveSizePolicy* size_policy);
-
-  // update counters
-  void update_counters();
-  void update_counters(CMSGCStats* gc_stats);
-  void update_counters_from_policy();
-
-  inline void update_cms_capacity_counter(size_t size_in_bytes) {
-    _cms_capacity_counter->set_value(size_in_bytes);
-  }
-
-  virtual GCPolicyCounters::Name kind() const {
-    return GCPolicyCounters::CMSGCAdaptivePolicyCountersKind;
-  }
-};
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSGCADAPTIVEPOLICYCOUNTERS_HPP
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -70,7 +70,6 @@
 class CompactibleFreeListSpace: public CompactibleSpace {
   friend class VMStructs;
   friend class ConcurrentMarkSweepGeneration;
-  friend class ASConcurrentMarkSweepGeneration;
   friend class CMSCollector;
   // Local alloc buffer for promotion into this space.
   friend class CFLS_LAB;
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -27,9 +27,8 @@
 #include "classfile/stringTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
-#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
+#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
-#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
@@ -319,26 +318,12 @@
   }
 }
 
-CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
+AdaptiveSizePolicy* CMSCollector::size_policy() {
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
     "Wrong type of heap");
-  CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
-    gch->gen_policy()->size_policy();
-  assert(sp->is_gc_cms_adaptive_size_policy(),
-    "Wrong type of size policy");
-  return sp;
-}
-
-CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
-  CMSGCAdaptivePolicyCounters* results =
-    (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
-  assert(
-    results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
-    "Wrong gc policy counter kind");
-  return results;
-}
-
+  return gch->gen_policy()->size_policy();
+}
 
 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
 
@@ -1573,11 +1558,11 @@
   }
 
   if (MetaspaceGC::should_concurrent_collect()) {
-      if (Verbose && PrintGCDetails) {
+    if (Verbose && PrintGCDetails) {
       gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
-      }
-      return true;
-    }
+    }
+    return true;
+  }
 
   // CMSTriggerInterval starts a CMS cycle if enough time has passed.
   if (CMSTriggerInterval >= 0) {
@@ -2031,11 +2016,6 @@
       "collections passed to foreground collector", _full_gcs_since_conc_gc);
   }
 
-  // Sample collection interval time and reset for collection pause.
-  if (UseAdaptiveSizePolicy) {
-    size_policy()->msc_collection_begin();
-  }
-
   // Temporarily widen the span of the weak reference processing to
   // the entire heap.
   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
@@ -2111,11 +2091,6 @@
   _inter_sweep_timer.reset();
   _inter_sweep_timer.start();
 
-  // Sample collection pause time and reset for collection interval.
-  if (UseAdaptiveSizePolicy) {
-    size_policy()->msc_collection_end(gch->gc_cause());
-  }
-
   gc_timer->register_gc_end();
 
   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
@@ -2373,26 +2348,14 @@
         }
         break;
       case Precleaning:
-        if (UseAdaptiveSizePolicy) {
-          size_policy()->concurrent_precleaning_begin();
-        }
         // marking from roots in markFromRoots has been completed
         preclean();
-        if (UseAdaptiveSizePolicy) {
-          size_policy()->concurrent_precleaning_end();
-        }
         assert(_collectorState == AbortablePreclean ||
                _collectorState == FinalMarking,
                "Collector state should have changed");
         break;
       case AbortablePreclean:
-        if (UseAdaptiveSizePolicy) {
-        size_policy()->concurrent_phases_resume();
-        }
         abortable_preclean();
-        if (UseAdaptiveSizePolicy) {
-          size_policy()->concurrent_precleaning_end();
-        }
         assert(_collectorState == FinalMarking, "Collector state should "
           "have changed");
         break;
@@ -2406,23 +2369,12 @@
         assert(_foregroundGCShouldWait, "block post-condition");
         break;
       case Sweeping:
-        if (UseAdaptiveSizePolicy) {
-          size_policy()->concurrent_sweeping_begin();
-        }
         // final marking in checkpointRootsFinal has been completed
         sweep(true);
         assert(_collectorState == Resizing, "Collector state change "
           "to Resizing must be done under the free_list_lock");
         _full_gcs_since_conc_gc = 0;
 
-        // Stop the timers for adaptive size policy for the concurrent phases
-        if (UseAdaptiveSizePolicy) {
-          size_policy()->concurrent_sweeping_end();
-          size_policy()->concurrent_phases_end(gch->gc_cause(),
-                                             gch->prev_gen(_cmsGen)->capacity(),
-                                             _cmsGen->free());
-        }
-
       case Resizing: {
         // Sweeping has been completed...
         // At this point the background collection has completed.
@@ -2539,9 +2491,6 @@
   const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id();
   NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
     true, NULL, gc_id);)
-  if (UseAdaptiveSizePolicy) {
-    size_policy()->ms_collection_begin();
-  }
   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
 
   HandleMark hm;  // Discard invalid handles created during verification
@@ -2633,11 +2582,6 @@
     }
   }
 
-  if (UseAdaptiveSizePolicy) {
-    GenCollectedHeap* gch = GenCollectedHeap::heap();
-    size_policy()->ms_collection_end(gch->gc_cause());
-  }
-
   if (VerifyAfterGC &&
       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
     Universe::verify();
@@ -3053,20 +2997,21 @@
   HandleMark  hm;
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
-  // Get a clear set of claim bits for the strong roots processing to work with.
+  // Get a clear set of claim bits for the roots processing to work with.
   ClassLoaderDataGraph::clear_claimed_marks();
 
   // Mark from roots one level into CMS
   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
 
-  gch->gen_process_strong_roots(_cmsGen->level(),
-                                true,   // younger gens are roots
-                                true,   // activate StrongRootsScope
-                                SharedHeap::ScanningOption(roots_scanning_options()),
-                                &notOlder,
-                                NULL,
-                                NULL); // SSS: Provide correct closure
+  gch->gen_process_roots(_cmsGen->level(),
+                         true,   // younger gens are roots
+                         true,   // activate StrongRootsScope
+                         SharedHeap::ScanningOption(roots_scanning_options()),
+                         should_unload_classes(),
+                         &notOlder,
+                         NULL,
+                         NULL);  // SSS: Provide correct closure
 
   // Now mark from the roots
   MarkFromRootsClosure markFromRootsClosure(this, _span,
@@ -3117,22 +3062,24 @@
   HandleMark  hm;
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
-  // Get a clear set of claim bits for the strong roots processing to work with.
+  // Get a clear set of claim bits for the roots processing to work with.
   ClassLoaderDataGraph::clear_claimed_marks();
 
   // Mark from roots one level into CMS
   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
                                      markBitMap());
-  KlassToOopClosure klass_closure(&notOlder);
+  CLDToOopClosure cld_closure(&notOlder, true);
 
   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
-  gch->gen_process_strong_roots(_cmsGen->level(),
-                                true,   // younger gens are roots
-                                true,   // activate StrongRootsScope
-                                SharedHeap::ScanningOption(roots_scanning_options()),
-                                &notOlder,
-                                NULL,
-                                &klass_closure);
+
+  gch->gen_process_roots(_cmsGen->level(),
+                         true,   // younger gens are roots
+                         true,   // activate StrongRootsScope
+                         SharedHeap::ScanningOption(roots_scanning_options()),
+                         should_unload_classes(),
+                         &notOlder,
+                         NULL,
+                         &cld_closure);
 
   // Now mark from the roots
   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
@@ -3319,12 +3266,10 @@
 void CMSCollector::setup_cms_unloading_and_verification_state() {
   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
                              || VerifyBeforeExit;
-  const  int  rso           =   SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache;
+  const  int  rso           =   SharedHeap::SO_AllCodeCache;
 
   // We set the proper root for this CMS cycle here.
   if (should_unload_classes()) {   // Should unload classes this cycle
-    remove_root_scanning_option(SharedHeap::SO_AllClasses);
-    add_root_scanning_option(SharedHeap::SO_SystemClasses);
     remove_root_scanning_option(rso);  // Shrink the root set appropriately
     set_verifying(should_verify);    // Set verification state for this cycle
     return;                            // Nothing else needs to be done at this time
@@ -3332,8 +3277,6 @@
 
   // Not unloading classes this cycle
   assert(!should_unload_classes(), "Inconsistency!");
-  remove_root_scanning_option(SharedHeap::SO_SystemClasses);
-  add_root_scanning_option(SharedHeap::SO_AllClasses);
 
   if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
     // Include symbols, strings and code cache elements to prevent their resurrection.
@@ -3687,9 +3630,6 @@
 
   NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
     PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
-  if (UseAdaptiveSizePolicy) {
-    size_policy()->checkpoint_roots_initial_begin();
-  }
 
   // Reset all the PLAB chunk arrays if necessary.
   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
@@ -3744,15 +3684,16 @@
       gch->set_par_threads(0);
     } else {
       // The serial version.
-      KlassToOopClosure klass_closure(&notOlder);
+      CLDToOopClosure cld_closure(&notOlder, true);
       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
-      gch->gen_process_strong_roots(_cmsGen->level(),
-                                    true,   // younger gens are roots
-                                    true,   // activate StrongRootsScope
-                                    SharedHeap::ScanningOption(roots_scanning_options()),
-                                    &notOlder,
-                                    NULL,
-                                    &klass_closure);
+      gch->gen_process_roots(_cmsGen->level(),
+                             true,   // younger gens are roots
+                             true,   // activate StrongRootsScope
+                             SharedHeap::ScanningOption(roots_scanning_options()),
+                             should_unload_classes(),
+                             &notOlder,
+                             NULL,
+                             &cld_closure);
     }
   }
 
@@ -3769,9 +3710,6 @@
   // Save the end of the used_region of the constituent generations
   // to be used to limit the extent of sweep in each generation.
   save_sweep_limits();
-  if (UseAdaptiveSizePolicy) {
-    size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
-  }
   verify_overflow_empty();
 }
 
@@ -3788,15 +3726,6 @@
 
   bool res;
   if (asynch) {
-
-    // Start the timers for adaptive size policy for the concurrent phases
-    // Do it here so that the foreground MS can use the concurrent
-    // timer since a foreground MS might has the sweep done concurrently
-    // or STW.
-    if (UseAdaptiveSizePolicy) {
-      size_policy()->concurrent_marking_begin();
-    }
-
     // Weak ref discovery note: We may be discovering weak
     // refs in this generation concurrent (but interleaved) with
     // weak ref discovery by a younger generation collector.
@@ -3814,22 +3743,12 @@
         gclog_or_tty->print_cr("bailing out to foreground collection");
       }
     }
-    if (UseAdaptiveSizePolicy) {
-      size_policy()->concurrent_marking_end();
-    }
   } else {
     assert(SafepointSynchronize::is_at_safepoint(),
            "inconsistent with asynch == false");
-    if (UseAdaptiveSizePolicy) {
-      size_policy()->ms_collection_marking_begin();
-    }
     // already have locks
     res = markFromRootsWork(asynch);
     _collectorState = FinalMarking;
-    if (UseAdaptiveSizePolicy) {
-      GenCollectedHeap* gch = GenCollectedHeap::heap();
-      size_policy()->ms_collection_marking_end(gch->gc_cause());
-    }
   }
   verify_overflow_empty();
   return res;
@@ -4705,8 +4624,7 @@
 
   if (clean_survivor) {  // preclean the active survivor space(s)
     assert(_young_gen->kind() == Generation::DefNew ||
-           _young_gen->kind() == Generation::ParNew ||
-           _young_gen->kind() == Generation::ASParNew,
+           _young_gen->kind() == Generation::ParNew,
          "incorrect type for cast");
     DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
@@ -5077,10 +4995,6 @@
   assert(haveFreelistLocks(), "must have free list locks");
   assert_lock_strong(bitMapLock());
 
-  if (UseAdaptiveSizePolicy) {
-    size_policy()->checkpoint_roots_final_begin();
-  }
-
   ResourceMark rm;
   HandleMark   hm;
 
@@ -5214,9 +5128,6 @@
       "Should be clear by end of the final marking");
   assert(_ct->klass_rem_set()->mod_union_is_clear(),
       "Should be clear by end of the final marking");
-  if (UseAdaptiveSizePolicy) {
-    size_policy()->checkpoint_roots_final_end(gch->gc_cause());
-  }
 }
 
 void CMSParInitialMarkTask::work(uint worker_id) {
@@ -5228,7 +5139,6 @@
   _timer.start();
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
-  KlassToOopClosure klass_closure(&par_mri_cl);
 
   // ---------- young gen roots --------------
   {
@@ -5244,13 +5154,17 @@
   // ---------- remaining roots --------------
   _timer.reset();
   _timer.start();
-  gch->gen_process_strong_roots(_collector->_cmsGen->level(),
-                                false,     // yg was scanned above
-                                false,     // this is parallel code
-                                SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
-                                &par_mri_cl,
-                                NULL,
-                                &klass_closure);
+
+  CLDToOopClosure cld_closure(&par_mri_cl, true);
+
+  gch->gen_process_roots(_collector->_cmsGen->level(),
+                         false,     // yg was scanned above
+                         false,     // this is parallel code
+                         SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
+                         _collector->should_unload_classes(),
+                         &par_mri_cl,
+                         NULL,
+                         &cld_closure);
   assert(_collector->should_unload_classes()
          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
@@ -5379,13 +5293,15 @@
   // ---------- remaining roots --------------
   _timer.reset();
   _timer.start();
-  gch->gen_process_strong_roots(_collector->_cmsGen->level(),
-                                false,     // yg was scanned above
-                                false,     // this is parallel code
-                                SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
-                                &par_mrias_cl,
-                                NULL,
-                                NULL);     // The dirty klasses will be handled below
+  gch->gen_process_roots(_collector->_cmsGen->level(),
+                         false,     // yg was scanned above
+                         false,     // this is parallel code
+                         SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
+                         _collector->should_unload_classes(),
+                         &par_mrias_cl,
+                         NULL,
+                         NULL);     // The dirty klasses will be handled below
+
   assert(_collector->should_unload_classes()
          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
@@ -5440,7 +5356,7 @@
   // We might have added oops to ClassLoaderData::_handles during the
   // concurrent marking phase. These oops point to newly allocated objects
   // that are guaranteed to be kept alive. Either by the direct allocation
-  // code, or when the young collector processes the strong roots. Hence,
+  // code, or when the young collector processes the roots. Hence,
   // we don't have to revisit the _handles block during the remark phase.
 
   // ---------- rescan dirty cards ------------
@@ -5862,7 +5778,7 @@
     cms_space,
     n_workers, workers, task_queues());
 
-  // Set up for parallel process_strong_roots work.
+  // Set up for parallel process_roots work.
   gch->set_par_threads(n_workers);
   // We won't be iterating over the cards in the card table updating
   // the younger_gen cards, so we shouldn't call the following else
@@ -5871,7 +5787,7 @@
   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
 
   // The young gen rescan work will not be done as part of
-  // process_strong_roots (which currently doesn't knw how to
+  // process_roots (which currently doesn't know how to
   // parallelize such a scan), but rather will be broken up into
   // a set of parallel tasks (via the sampling that the [abortable]
   // preclean phase did of EdenSpace, plus the [two] tasks of
@@ -5968,13 +5884,15 @@
 
     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
     GenCollectedHeap::StrongRootsScope srs(gch);
-    gch->gen_process_strong_roots(_cmsGen->level(),
-                                  true,  // younger gens as roots
-                                  false, // use the local StrongRootsScope
-                                  SharedHeap::ScanningOption(roots_scanning_options()),
-                                  &mrias_cl,
-                                  NULL,
-                                  NULL);  // The dirty klasses will be handled below
+
+    gch->gen_process_roots(_cmsGen->level(),
+                           true,  // younger gens as roots
+                           false, // use the local StrongRootsScope
+                           SharedHeap::ScanningOption(roots_scanning_options()),
+                           should_unload_classes(),
+                           &mrias_cl,
+                           NULL,
+                           NULL); // The dirty klasses will be handled below
 
     assert(should_unload_classes()
            || (roots_scanning_options() & SharedHeap::SO_AllCodeCache),
@@ -6014,7 +5932,7 @@
   // We might have added oops to ClassLoaderData::_handles during the
   // concurrent marking phase. These oops point to newly allocated objects
   // that are guaranteed to be kept alive. Either by the direct allocation
-  // code, or when the young collector processes the strong roots. Hence,
+  // code, or when the young collector processes the roots. Hence,
   // we don't have to revisit the _handles block during the remark phase.
 
   verify_work_stacks_empty();
@@ -6264,15 +6182,14 @@
       // Clean up unreferenced symbols in symbol table.
       SymbolTable::unlink();
     }
-  }
-
-  // CMS doesn't use the StringTable as hard roots when class unloading is turned off.
-  // Need to check if we really scanned the StringTable.
-  if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
-    GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
-    // Delete entries for dead interned strings.
-    StringTable::unlink(&_is_alive_closure);
-  }
+
+    {
+      GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
+      // Delete entries for dead interned strings.
+      StringTable::unlink(&_is_alive_closure);
+    }
+  }
+
 
   // Restore any preserved marks as a result of mark stack or
   // work queue overflow
@@ -6329,7 +6246,6 @@
 
   _inter_sweep_timer.stop();
   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
-  size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
 
   assert(!_intra_sweep_timer.is_active(), "Should not be active");
   _intra_sweep_timer.reset();
@@ -6454,17 +6370,6 @@
   }
 }
 
-CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->kind() == CollectedHeap::GenCollectedHeap,
-    "Wrong type of heap");
-  CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
-    gch->gen_policy()->size_policy();
-  assert(sp->is_gc_cms_adaptive_size_policy(),
-    "Wrong type of size policy");
-  return sp;
-}
-
 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
   if (PrintGCDetails && Verbose) {
     gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
@@ -6540,9 +6445,6 @@
 // Reset CMS data structures (for now just the marking bit map)
 // preparatory for the next cycle.
 void CMSCollector::reset(bool asynch) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  CMSAdaptiveSizePolicy* sp = size_policy();
-  AdaptiveSizePolicyOutput(sp, gch->total_collections());
   if (asynch) {
     CMSTokenSyncWithLocks ts(true, bitMapLock());
 
@@ -6597,7 +6499,7 @@
     // Because only the full (i.e., concurrent mode failure) collections
     // are being measured for gc overhead limits, clean the "near" flag
     // and count.
-    sp->reset_gc_overhead_limit_count();
+    size_policy()->reset_gc_overhead_limit_count();
     _collectorState = Idling;
   } else {
     // already have the lock
@@ -7064,7 +6966,6 @@
   ConcurrentMarkSweepThread::desynchronize(true);
   ConcurrentMarkSweepThread::acknowledge_yield_request();
   _collector->stopTimer();
-  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
   if (PrintCMSStatistics != 0) {
     _collector->incrementYields();
   }
@@ -7225,7 +7126,6 @@
   ConcurrentMarkSweepThread::desynchronize(true);
   ConcurrentMarkSweepThread::acknowledge_yield_request();
   _collector->stopTimer();
-  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
   if (PrintCMSStatistics != 0) {
     _collector->incrementYields();
   }
@@ -7298,7 +7198,6 @@
   ConcurrentMarkSweepThread::desynchronize(true);
   ConcurrentMarkSweepThread::acknowledge_yield_request();
   _collector->stopTimer();
-  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
   if (PrintCMSStatistics != 0) {
     _collector->incrementYields();
   }
@@ -7457,7 +7356,6 @@
   ConcurrentMarkSweepThread::desynchronize(true);
   ConcurrentMarkSweepThread::acknowledge_yield_request();
   _collector->stopTimer();
-  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
   if (PrintCMSStatistics != 0) {
     _collector->incrementYields();
   }
@@ -8099,7 +7997,6 @@
   ConcurrentMarkSweepThread::acknowledge_yield_request();
 
   _collector->stopTimer();
-  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
   if (PrintCMSStatistics != 0) {
     _collector->incrementYields();
   }
@@ -8780,7 +8677,6 @@
   ConcurrentMarkSweepThread::desynchronize(true);
   ConcurrentMarkSweepThread::acknowledge_yield_request();
   _collector->stopTimer();
-  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
   if (PrintCMSStatistics != 0) {
     _collector->incrementYields();
   }
@@ -9327,172 +9223,6 @@
 }
 #endif
 
-CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
-{
-  GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
-  CMSAdaptiveSizePolicy* size_policy =
-    (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
-  assert(size_policy->is_gc_cms_adaptive_size_policy(),
-    "Wrong type for size policy");
-  return size_policy;
-}
-
-void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
-                                           size_t desired_promo_size) {
-  if (cur_promo_size < desired_promo_size) {
-    size_t expand_bytes = desired_promo_size - cur_promo_size;
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
-        "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
-        expand_bytes);
-    }
-    expand(expand_bytes,
-           MinHeapDeltaBytes,
-           CMSExpansionCause::_adaptive_size_policy);
-  } else if (desired_promo_size < cur_promo_size) {
-    size_t shrink_bytes = cur_promo_size - desired_promo_size;
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
-        "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
-        shrink_bytes);
-    }
-    shrink(shrink_bytes);
-  }
-}
-
-CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  CMSGCAdaptivePolicyCounters* counters =
-    (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
-  assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
-    "Wrong kind of counters");
-  return counters;
-}
-
-
-void ASConcurrentMarkSweepGeneration::update_counters() {
-  if (UsePerfData) {
-    _space_counters->update_all();
-    _gen_counters->update_all();
-    CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
-    GenCollectedHeap* gch = GenCollectedHeap::heap();
-    CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
-    assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
-      "Wrong gc statistics type");
-    counters->update_counters(gc_stats_l);
-  }
-}
-
-void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
-  if (UsePerfData) {
-    _space_counters->update_used(used);
-    _space_counters->update_capacity();
-    _gen_counters->update_all();
-
-    CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
-    GenCollectedHeap* gch = GenCollectedHeap::heap();
-    CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
-    assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
-      "Wrong gc statistics type");
-    counters->update_counters(gc_stats_l);
-  }
-}
-
-void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
-  assert_locked_or_safepoint(Heap_lock);
-  assert_lock_strong(freelistLock());
-  HeapWord* old_end = _cmsSpace->end();
-  HeapWord* unallocated_start = _cmsSpace->unallocated_block();
-  assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
-  FreeChunk* chunk_at_end = find_chunk_at_end();
-  if (chunk_at_end == NULL) {
-    // No room to shrink
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print_cr("No room to shrink: old_end  "
-        PTR_FORMAT "  unallocated_start  " PTR_FORMAT
-        " chunk_at_end  " PTR_FORMAT,
-        old_end, unallocated_start, chunk_at_end);
-    }
-    return;
-  } else {
-
-    // Find the chunk at the end of the space and determine
-    // how much it can be shrunk.
-    size_t shrinkable_size_in_bytes = chunk_at_end->size();
-    size_t aligned_shrinkable_size_in_bytes =
-      align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
-    assert(unallocated_start <= (HeapWord*) chunk_at_end->end(),
-      "Inconsistent chunk at end of space");
-    size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
-    size_t word_size_before = heap_word_size(_virtual_space.committed_size());
-
-    // Shrink the underlying space
-    _virtual_space.shrink_by(bytes);
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
-        " desired_bytes " SIZE_FORMAT
-        " shrinkable_size_in_bytes " SIZE_FORMAT
-        " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
-        "  bytes  " SIZE_FORMAT,
-        desired_bytes, shrinkable_size_in_bytes,
-        aligned_shrinkable_size_in_bytes, bytes);
-      gclog_or_tty->print_cr("          old_end  " SIZE_FORMAT
-        "  unallocated_start  " SIZE_FORMAT,
-        old_end, unallocated_start);
-    }
-
-    // If the space did shrink (shrinking is not guaranteed),
-    // shrink the chunk at the end by the appropriate amount.
-    if (((HeapWord*)_virtual_space.high()) < old_end) {
-      size_t new_word_size =
-        heap_word_size(_virtual_space.committed_size());
-
-      // Have to remove the chunk from the dictionary because it is changing
-      // size and might be someplace elsewhere in the dictionary.
-
-      // Get the chunk at end, shrink it, and put it
-      // back.
-      _cmsSpace->removeChunkFromDictionary(chunk_at_end);
-      size_t word_size_change = word_size_before - new_word_size;
-      size_t chunk_at_end_old_size = chunk_at_end->size();
-      assert(chunk_at_end_old_size >= word_size_change,
-        "Shrink is too large");
-      chunk_at_end->set_size(chunk_at_end_old_size -
-                          word_size_change);
-      _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
-        word_size_change);
-
-      _cmsSpace->returnChunkToDictionary(chunk_at_end);
-
-      MemRegion mr(_cmsSpace->bottom(), new_word_size);
-      _bts->resize(new_word_size);  // resize the block offset shared array
-      Universe::heap()->barrier_set()->resize_covered_region(mr);
-      _cmsSpace->assert_locked();
-      _cmsSpace->set_end((HeapWord*)_virtual_space.high());
-
-      NOT_PRODUCT(_cmsSpace->dictionary()->verify());
-
-      // update the space and generation capacity counters
-      if (UsePerfData) {
-        _space_counters->update_capacity();
-        _gen_counters->update_all();
-      }
-
-      if (Verbose && PrintGCDetails) {
-        size_t new_mem_size = _virtual_space.committed_size();
-        size_t old_mem_size = new_mem_size + bytes;
-        gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
-                      name(), old_mem_size/K, bytes/K, new_mem_size/K);
-      }
-    }
-
-    assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
-      "Inconsistency at end of space");
-    assert(chunk_at_end->end() == (uintptr_t*) _cmsSpace->end(),
-      "Shrinking is inconsistent");
-    return;
-  }
-}
 // Transfer some number of overflown objects to usual marking
 // stack. Return true if some objects were transferred.
 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -32,6 +32,7 @@
 #include "gc_implementation/shared/generationCounters.hpp"
 #include "memory/freeBlockDictionary.hpp"
 #include "memory/generation.hpp"
+#include "memory/iterator.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/virtualspace.hpp"
 #include "services/memoryService.hpp"
@@ -52,7 +53,7 @@
 // Concurrent mode failures are currently handled by
 // means of a sliding mark-compact.
 
-class CMSAdaptiveSizePolicy;
+class AdaptiveSizePolicy;
 class CMSConcMarkingTask;
 class CMSGCAdaptivePolicyCounters;
 class CMSTracer;
@@ -1009,8 +1010,7 @@
   void icms_wait();          // Called at yield points.
 
   // Adaptive size policy
-  CMSAdaptiveSizePolicy* size_policy();
-  CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
+  AdaptiveSizePolicy* size_policy();
 
   static void print_on_error(outputStream* st);
 
@@ -1150,9 +1150,6 @@
 
   virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
 
-  // Adaptive size policy
-  CMSAdaptiveSizePolicy* size_policy();
-
   void set_did_compact(bool v) { _did_compact = v; }
 
   bool refs_discovery_is_atomic() const { return false; }
@@ -1346,37 +1343,6 @@
   void rotate_debug_collection_type();
 };
 
-class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
-
-  // Return the size policy from the heap's collector
-  // policy casted to CMSAdaptiveSizePolicy*.
-  CMSAdaptiveSizePolicy* cms_size_policy() const;
-
-  // Resize the generation based on the adaptive size
-  // policy.
-  void resize(size_t cur_promo, size_t desired_promo);
-
-  // Return the GC counters from the collector policy
-  CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
-
-  virtual void shrink_by(size_t bytes);
-
- public:
-  ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
-                                  int level, CardTableRS* ct,
-                                  bool use_adaptive_freelists,
-                                  FreeBlockDictionary<FreeChunk>::DictionaryChoice
-                                    dictionaryChoice) :
-    ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
-      use_adaptive_freelists, dictionaryChoice) {}
-
-  virtual const char* short_name() const { return "ASCMS"; }
-  virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; }
-
-  virtual void update_counters();
-  virtual void update_counters(size_t used);
-};
-
 //
 // Closures of various sorts used by CMS to accomplish its work
 //
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
 #include "memory/memRegion.hpp"
 #include "oops/markOop.hpp"
 #include "runtime/mutex.hpp"
+#include "runtime/orderAccess.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/ostream.hpp"
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "classfile/symbolTable.hpp"
+#include "code/codeCache.hpp"
 #include "gc_implementation/g1/concurrentMark.inline.hpp"
 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
@@ -39,6 +40,7 @@
 #include "gc_implementation/shared/gcTimer.hpp"
 #include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/gcTraceTime.hpp"
+#include "memory/allocation.hpp"
 #include "memory/genOopClosures.inline.hpp"
 #include "memory/referencePolicy.hpp"
 #include "memory/resourceArea.hpp"
@@ -58,8 +60,8 @@
   _bmWordSize = 0;
 }
 
-HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
-                                               HeapWord* limit) const {
+HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
+                                               const HeapWord* limit) const {
   // First we must round addr *up* to a possible object boundary.
   addr = (HeapWord*)align_size_up((intptr_t)addr,
                                   HeapWordSize << _shifter);
@@ -76,8 +78,8 @@
   return nextAddr;
 }
 
-HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
-                                                 HeapWord* limit) const {
+HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
+                                                 const HeapWord* limit) const {
   size_t addrOffset = heapWordToOffset(addr);
   if (limit == NULL) {
     limit = _bmStartWord + _bmWordSize;
@@ -1223,6 +1225,9 @@
 };
 
 void ConcurrentMark::scanRootRegions() {
+  // Start of concurrent marking.
+  ClassLoaderDataGraph::clear_claimed_marks();
+
   // scan_in_progress() will have been set to true only if there was
   // at least one root region to scan. So, if it's false, we
   // should not attempt to do any further work.
@@ -1271,7 +1276,7 @@
   CMConcurrentMarkingTask markingTask(this, cmThread());
   if (use_parallel_marking_threads()) {
     _parallel_workers->set_active_workers((int)active_workers);
-    // Don't set _n_par_threads because it affects MT in process_strong_roots()
+    // Don't set _n_par_threads because it affects MT in process_roots()
     // and the decisions on that MT processing is made elsewhere.
     assert(_parallel_workers->active_workers() > 0, "Should have been set");
     _parallel_workers->run_task(&markingTask);
@@ -2142,23 +2147,29 @@
   // Update the soft reference policy with the new heap occupancy.
   Universe::update_heap_info_at_gc();
 
-  // We need to make this be a "collection" so any collection pause that
-  // races with it goes around and waits for completeCleanup to finish.
-  g1h->increment_total_collections();
-
-  // We reclaimed old regions so we should calculate the sizes to make
-  // sure we update the old gen/space data.
-  g1h->g1mm()->update_sizes();
-
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
     Universe::heap()->prepare_for_verify();
     Universe::verify(VerifyOption_G1UsePrevMarking,
                      " VerifyDuringGC:(after)");
   }
+
   g1h->check_bitmaps("Cleanup End");
 
   g1h->verify_region_sets_optional();
+
+  // We need to make this be a "collection" so any collection pause that
+  // races with it goes around and waits for completeCleanup to finish.
+  g1h->increment_total_collections();
+
+  // Clean out dead classes and update Metaspace sizes.
+  ClassLoaderDataGraph::purge();
+  MetaspaceGC::compute_new_size();
+
+  // We reclaimed old regions so we should calculate the sizes to make
+  // sure we update the old gen/space data.
+  g1h->g1mm()->update_sizes();
+
   g1h->trace_heap_after_concurrent_cycle();
 }
 
@@ -2445,6 +2456,26 @@
   _g1h->set_par_threads(0);
 }
 
+void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
+  G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
+}
+
+// Helper class to get rid of some boilerplate code.
+class G1RemarkGCTraceTime : public GCTraceTime {
+  static bool doit_and_prepend(bool doit) {
+    if (doit) {
+      gclog_or_tty->put(' ');
+    }
+    return doit;
+  }
+
+ public:
+  G1RemarkGCTraceTime(const char* title, bool doit)
+    : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
+        G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
+  }
+};
+
 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
   if (has_overflown()) {
     // Skip processing the discovered references if we have
@@ -2557,9 +2588,28 @@
     return;
   }
 
-  g1h->unlink_string_and_symbol_table(&g1_is_alive,
-                                      /* process_strings */ false, // currently strings are always roots
-                                      /* process_symbols */ true);
+  assert(_markStack.isEmpty(), "Marking should have completed");
+
+  // Unload Klasses, String, Symbols, Code Cache, etc.
+
+  G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
+
+  bool purged_classes;
+
+  {
+    G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
+    purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
+  }
+
+  {
+    G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
+    weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
+  }
+
+  if (G1StringDedup::is_enabled()) {
+    G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
+    G1StringDedup::unlink(&g1_is_alive);
+  }
 }
 
 void ConcurrentMark::swapMarkBitMaps() {
@@ -2568,6 +2618,57 @@
   _nextMarkBitMap  = (CMBitMap*)  temp;
 }
 
+class CMObjectClosure;
+
+// Closure for iterating over objects, currently only used for
+// processing SATB buffers.
+class CMObjectClosure : public ObjectClosure {
+private:
+  CMTask* _task;
+
+public:
+  void do_object(oop obj) {
+    _task->deal_with_reference(obj);
+  }
+
+  CMObjectClosure(CMTask* task) : _task(task) { }
+};
+
+class G1RemarkThreadsClosure : public ThreadClosure {
+  CMObjectClosure _cm_obj;
+  G1CMOopClosure _cm_cl;
+  MarkingCodeBlobClosure _code_cl;
+  int _thread_parity;
+  bool _is_par;
+
+ public:
+  G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) :
+    _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
+    _thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {}
+
+  void do_thread(Thread* thread) {
+    if (thread->is_Java_thread()) {
+      if (thread->claim_oops_do(_is_par, _thread_parity)) {
+        JavaThread* jt = (JavaThread*)thread;
+
+        // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
+        // however the liveness of oops reachable from nmethods have very complex lifecycles:
+        // * Alive if on the stack of an executing method
+        // * Weakly reachable otherwise
+        // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
+        // live by the SATB invariant but other oops recorded in nmethods may behave differently.
+        jt->nmethods_do(&_code_cl);
+
+        jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj);
+      }
+    } else if (thread->is_VM_thread()) {
+      if (thread->claim_oops_do(_is_par, _thread_parity)) {
+        JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj);
+      }
+    }
+  }
+};
+
 class CMRemarkTask: public AbstractGangTask {
 private:
   ConcurrentMark* _cm;
@@ -2579,6 +2680,14 @@
     if (worker_id < _cm->active_tasks()) {
       CMTask* task = _cm->task(worker_id);
       task->record_start_time();
+      {
+        ResourceMark rm;
+        HandleMark hm;
+
+        G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial);
+        Threads::threads_do(&threads_f);
+      }
+
       do {
         task->do_marking_step(1000000000.0 /* something very large */,
                               true         /* do_termination       */,
@@ -2601,6 +2710,8 @@
   HandleMark   hm;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
+  G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer());
+
   g1h->ensure_parsability(false);
 
   if (G1CollectedHeap::use_parallel_gc_threads()) {
@@ -3430,20 +3541,6 @@
   }
 };
 
-// Closure for iterating over objects, currently only used for
-// processing SATB buffers.
-class CMObjectClosure : public ObjectClosure {
-private:
-  CMTask* _task;
-
-public:
-  void do_object(oop obj) {
-    _task->deal_with_reference(obj);
-  }
-
-  CMObjectClosure(CMTask* task) : _task(task) { }
-};
-
 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
                                ConcurrentMark* cm,
                                CMTask* task)
@@ -3908,15 +4005,6 @@
     }
   }
 
-  if (!concurrent() && !has_aborted()) {
-    // We should only do this during remark.
-    if (G1CollectedHeap::use_parallel_gc_threads()) {
-      satb_mq_set.par_iterate_closure_all_threads(_worker_id);
-    } else {
-      satb_mq_set.iterate_closure_all_threads();
-    }
-  }
-
   _draining_satb_buffers = false;
 
   assert(has_aborted() ||
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
 
+#include "classfile/javaClasses.hpp"
 #include "gc_implementation/g1/heapRegionSet.hpp"
 #include "gc_implementation/shared/gcId.hpp"
 #include "utilities/taskqueue.hpp"
@@ -86,19 +87,19 @@
   // Return the address corresponding to the next marked bit at or after
   // "addr", and before "limit", if "limit" is non-NULL.  If there is no
   // such bit, returns "limit" if that is non-NULL, or else "endWord()".
-  HeapWord* getNextMarkedWordAddress(HeapWord* addr,
-                                     HeapWord* limit = NULL) const;
+  HeapWord* getNextMarkedWordAddress(const HeapWord* addr,
+                                     const HeapWord* limit = NULL) const;
   // Return the address corresponding to the next unmarked bit at or after
   // "addr", and before "limit", if "limit" is non-NULL.  If there is no
   // such bit, returns "limit" if that is non-NULL, or else "endWord()".
-  HeapWord* getNextUnmarkedWordAddress(HeapWord* addr,
-                                       HeapWord* limit = NULL) const;
+  HeapWord* getNextUnmarkedWordAddress(const HeapWord* addr,
+                                       const HeapWord* limit = NULL) const;
 
   // conversion utilities
   HeapWord* offsetToHeapWord(size_t offset) const {
     return _bmStartWord + (offset << _shifter);
   }
-  size_t heapWordToOffset(HeapWord* addr) const {
+  size_t heapWordToOffset(const HeapWord* addr) const {
     return pointer_delta(addr, _bmStartWord) >> _shifter;
   }
   int heapWordDiffToOffsetDiff(size_t diff) const;
@@ -476,6 +477,7 @@
   ForceOverflowSettings _force_overflow_conc;
   ForceOverflowSettings _force_overflow_stw;
 
+  void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes);
   void weakRefsWork(bool clear_all_soft_refs);
 
   void swapMarkBitMaps();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_INLINE_HPP
 
 #include "gc_implementation/g1/g1AllocRegion.hpp"
+#include "gc_implementation/g1/heapRegion.inline.hpp"
 
 inline HeapWord* G1AllocRegion::allocate(HeapRegion* alloc_region,
                                          size_t word_size,
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -426,7 +426,7 @@
       q = n;
       oop obj = oop(q);
       if (obj->klass_or_null() == NULL) return q;
-      n += obj->size();
+      n += block_size(q);
     }
     assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
     // [q, n) is the block that crosses the boundary.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -26,7 +26,8 @@
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP
 
 #include "gc_implementation/g1/g1BlockOffsetTable.hpp"
-#include "gc_implementation/g1/heapRegion.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/heapRegion.inline.hpp"
 #include "memory/space.hpp"
 
 inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) {
@@ -112,7 +113,7 @@
     q = n;
     oop obj = oop(q);
     if (obj->klass_or_null() == NULL) return q;
-    n += obj->size();
+    n += block_size(q);
   }
   assert(q <= n, "wrong order for q and addr");
   assert(addr < n, "wrong order for addr and n");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -30,23 +30,52 @@
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
-G1CodeRootChunk::G1CodeRootChunk() : _top(NULL), _next(NULL), _prev(NULL) {
+G1CodeRootChunk::G1CodeRootChunk() : _top(NULL), _next(NULL), _prev(NULL), _free(NULL) {
   _top = bottom();
 }
 
 void G1CodeRootChunk::reset() {
   _next = _prev = NULL;
+  _free = NULL;
   _top = bottom();
 }
 
 void G1CodeRootChunk::nmethods_do(CodeBlobClosure* cl) {
-  nmethod** cur = bottom();
+  NmethodOrLink* cur = bottom();
   while (cur != _top) {
-    cl->do_code_blob(*cur);
+    if (is_nmethod(cur)) {
+      cl->do_code_blob(cur->_nmethod);
+    }
     cur++;
   }
 }
 
+bool G1CodeRootChunk::remove_lock_free(nmethod* method) {
+  NmethodOrLink* cur = bottom();
+
+  for (NmethodOrLink* cur = bottom(); cur != _top; cur++) {
+    if (cur->_nmethod == method) {
+      bool result = Atomic::cmpxchg_ptr(NULL, &cur->_nmethod, method) == method;
+
+      if (!result) {
+        // Someone else cleared out this entry.
+        return false;
+      }
+
+      // The method was cleared. Time to link it into the free list.
+      NmethodOrLink* prev_free;
+      do {
+        prev_free = (NmethodOrLink*)_free;
+        cur->_link = prev_free;
+      } while (Atomic::cmpxchg_ptr(cur, &_free, prev_free) != prev_free);
+
+      return true;
+    }
+  }
+
+  return false;
+}
+
 G1CodeRootChunkManager::G1CodeRootChunkManager() : _free_list(), _num_chunks_handed_out(0) {
   _free_list.initialize();
   _free_list.set_size(G1CodeRootChunk::word_size());
@@ -140,34 +169,43 @@
 
 void G1CodeRootSet::add(nmethod* method) {
   if (!contains(method)) {
-    // Try to add the nmethod. If there is not enough space, get a new chunk.
-    if (_list.head() == NULL || _list.head()->is_full()) {
-      G1CodeRootChunk* cur = new_chunk();
+    // Find the first chunk that isn't full.
+    G1CodeRootChunk* cur = _list.head();
+    while (cur != NULL) {
+      if (!cur->is_full()) {
+        break;
+      }
+      cur = cur->next();
+    }
+
+    // All chunks are full, get a new chunk.
+    if (cur == NULL) {
+      cur = new_chunk();
       _list.return_chunk_at_head(cur);
     }
-    bool result = _list.head()->add(method);
+
+    // Add the nmethod.
+    bool result = cur->add(method);
+
     guarantee(result, err_msg("Not able to add nmethod "PTR_FORMAT" to newly allocated chunk.", method));
+
     _length++;
   }
 }
 
-void G1CodeRootSet::remove(nmethod* method) {
+void G1CodeRootSet::remove_lock_free(nmethod* method) {
   G1CodeRootChunk* found = find(method);
   if (found != NULL) {
-    bool result = found->remove(method);
-    guarantee(result, err_msg("could not find nmethod "PTR_FORMAT" during removal although we previously found it", method));
-    // eventually free completely emptied chunk
-    if (found->is_empty()) {
-      _list.remove_chunk(found);
-      free(found);
+    bool result = found->remove_lock_free(method);
+    if (result) {
+      Atomic::dec_ptr((volatile intptr_t*)&_length);
     }
-    _length--;
   }
   assert(!contains(method), err_msg(PTR_FORMAT" still contains nmethod "PTR_FORMAT, this, method));
 }
 
 nmethod* G1CodeRootSet::pop() {
-  do {
+  while (true) {
     G1CodeRootChunk* cur = _list.head();
     if (cur == NULL) {
       assert(_length == 0, "when there are no chunks, there should be no elements");
@@ -180,7 +218,7 @@
     } else {
       free(_list.get_chunk_at_head());
     }
-  } while (true);
+  }
 }
 
 G1CodeRootChunk* G1CodeRootSet::find(nmethod* method) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -31,6 +31,14 @@
 
 class CodeBlobClosure;
 
+// The elements of the G1CodeRootChunk is either:
+//  1) nmethod pointers
+//  2) nodes in an internally chained free list
+typedef union {
+  nmethod* _nmethod;
+  void*    _link;
+} NmethodOrLink;
+
 class G1CodeRootChunk : public CHeapObj<mtGC> {
  private:
   static const int NUM_ENTRIES = 32;
@@ -38,16 +46,28 @@
   G1CodeRootChunk*     _next;
   G1CodeRootChunk*     _prev;
 
-  nmethod** _top;
+  NmethodOrLink*          _top;
+  // First free position within the chunk.
+  volatile NmethodOrLink* _free;
 
-  nmethod* _data[NUM_ENTRIES];
+  NmethodOrLink _data[NUM_ENTRIES];
 
-  nmethod** bottom() const {
-    return (nmethod**) &(_data[0]);
+  NmethodOrLink* bottom() const {
+    return (NmethodOrLink*) &(_data[0]);
   }
 
-  nmethod** end() const {
-    return (nmethod**) &(_data[NUM_ENTRIES]);
+  NmethodOrLink* end() const {
+    return (NmethodOrLink*) &(_data[NUM_ENTRIES]);
+  }
+
+  bool is_link(NmethodOrLink* nmethod_or_link) {
+    return nmethod_or_link->_link == NULL ||
+        (bottom() <= nmethod_or_link->_link
+        && nmethod_or_link->_link < end());
+  }
+
+  bool is_nmethod(NmethodOrLink* nmethod_or_link) {
+    return !is_link(nmethod_or_link);
   }
 
  public:
@@ -85,46 +105,55 @@
   }
 
   bool is_full() const {
-    return _top == (nmethod**)end();
+    return _top == end() && _free == NULL;
   }
 
   bool contains(nmethod* method) {
-    nmethod** cur = bottom();
+    NmethodOrLink* cur = bottom();
     while (cur != _top) {
-      if (*cur == method) return true;
+      if (cur->_nmethod == method) return true;
       cur++;
     }
     return false;
   }
 
   bool add(nmethod* method) {
-    if (is_full()) return false;
-    *_top = method;
-    _top++;
+    if (is_full()) {
+      return false;
+    }
+
+    if (_free != NULL) {
+      // Take from internally chained free list
+      NmethodOrLink* first_free = (NmethodOrLink*)_free;
+      _free = (NmethodOrLink*)_free->_link;
+      first_free->_nmethod = method;
+    } else {
+      // Take from top.
+      _top->_nmethod = method;
+      _top++;
+    }
+
     return true;
   }
 
-  bool remove(nmethod* method) {
-    nmethod** cur = bottom();
-    while (cur != _top) {
-      if (*cur == method) {
-        memmove(cur, cur + 1, (_top - (cur + 1)) * sizeof(nmethod**));
-        _top--;
-        return true;
-      }
-      cur++;
-    }
-    return false;
-  }
+  bool remove_lock_free(nmethod* method);
 
   void nmethods_do(CodeBlobClosure* blk);
 
   nmethod* pop() {
-    if (is_empty()) {
-      return NULL;
+    if (_free != NULL) {
+      // Kill the free list.
+      _free = NULL;
     }
-    _top--;
-    return *_top;
+
+    while (!is_empty()) {
+      _top--;
+      if (is_nmethod(_top)) {
+        return _top->_nmethod;
+      }
+    }
+
+    return NULL;
   }
 };
 
@@ -193,7 +222,7 @@
   // method is likely to be repeatedly called with the same nmethod.
   void add(nmethod* method);
 
-  void remove(nmethod* method);
+  void remove_lock_free(nmethod* method);
   nmethod* pop();
 
   bool contains(nmethod* method);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -44,6 +44,7 @@
 #include "gc_implementation/g1/g1Log.hpp"
 #include "gc_implementation/g1/g1MarkSweep.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
+#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
 #include "gc_implementation/g1/g1RemSet.inline.hpp"
 #include "gc_implementation/g1/g1StringDedup.hpp"
 #include "gc_implementation/g1/g1YCTypes.hpp"
@@ -56,6 +57,7 @@
 #include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/gcTraceTime.hpp"
 #include "gc_implementation/shared/isGCActiveMark.hpp"
+#include "memory/allocation.hpp"
 #include "memory/gcLocker.inline.hpp"
 #include "memory/generationSpec.hpp"
 #include "memory/iterator.hpp"
@@ -63,11 +65,9 @@
 #include "oops/oop.inline.hpp"
 #include "oops/oop.pcgc.inline.hpp"
 #include "runtime/atomic.inline.hpp"
-#include "runtime/prefetch.inline.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/vmThread.hpp"
 #include "utilities/globalDefinitions.hpp"
-#include "utilities/ticks.hpp"
 
 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
 
@@ -92,10 +92,10 @@
 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
 // The number of GC workers is passed to heap_region_par_iterate_chunked().
 // It does use run_task() which sets _n_workers in the task.
-// G1ParTask executes g1_process_strong_roots() ->
-// SharedHeap::process_strong_roots() which calls eventually to
+// G1ParTask executes g1_process_roots() ->
+// SharedHeap::process_roots() which calls eventually to
 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
-// SequentialSubTasksDone.  SharedHeap::process_strong_roots() also
+// SequentialSubTasksDone.  SharedHeap::process_roots() also
 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
 //
 
@@ -3380,25 +3380,19 @@
     if (!silent) { gclog_or_tty->print("Roots "); }
     VerifyRootsClosure rootsCl(vo);
     VerifyKlassClosure klassCl(this, &rootsCl);
+    CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
 
     // We apply the relevant closures to all the oops in the
-    // system dictionary, class loader data graph and the string table.
-    // Don't verify the code cache here, since it's verified below.
-    const int so = SO_AllClasses | SO_Strings;
-
-    // Need cleared claim bits for the strong roots processing
-    ClassLoaderDataGraph::clear_claimed_marks();
-
-    process_strong_roots(true,      // activate StrongRootsScope
-                         ScanningOption(so),  // roots scanning options
-                         &rootsCl,
-                         &klassCl
-                         );
-
-    // Verify the nmethods in the code cache.
+    // system dictionary, class loader data graph, the string table
+    // and the nmethods in the code cache.
     G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
     G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
-    CodeCache::blobs_do(&blobsCl);
+
+    process_all_roots(true,            // activate StrongRootsScope
+                      SO_AllCodeCache, // roots scanning options
+                      &rootsCl,
+                      &cldCl,
+                      &blobsCl);
 
     bool failures = rootsCl.failures() || codeRootsCl.failures();
 
@@ -3980,6 +3974,7 @@
       increment_gc_time_stamp();
 
       verify_before_gc();
+
       check_bitmaps("GC Start");
 
       COMPILER2_PRESENT(DerivedPointerTable::clear());
@@ -4330,11 +4325,7 @@
   assert(_mutator_alloc_region.get() == NULL, "post-condition");
 }
 
-void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
-  assert_at_safepoint(true /* should_be_vm_thread */);
-
-  _survivor_gc_alloc_region.init();
-  _old_gc_alloc_region.init();
+void G1CollectedHeap::use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info) {
   HeapRegion* retained_region = _retained_old_gc_alloc_region;
   _retained_old_gc_alloc_region = NULL;
 
@@ -4352,7 +4343,7 @@
       !(retained_region->top() == retained_region->end()) &&
       !retained_region->is_empty() &&
       !retained_region->isHumongous()) {
-    retained_region->set_saved_mark();
+    retained_region->record_top_and_timestamp();
     // The retained region was added to the old region set when it was
     // retired. We have to remove it now, since we don't allow regions
     // we allocate to in the region sets. We'll re-add it later, when
@@ -4366,6 +4357,15 @@
   }
 }
 
+void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
+  assert_at_safepoint(true /* should_be_vm_thread */);
+
+  _survivor_gc_alloc_region.init();
+  _old_gc_alloc_region.init();
+
+  use_retained_old_gc_alloc_region(evacuation_info);
+}
+
 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
   evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() +
                                          _old_gc_alloc_region.count());
@@ -4559,126 +4559,6 @@
 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
   ParGCAllocBuffer(gclab_word_size), _retired(true) { }
 
-G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
-  : _g1h(g1h),
-    _refs(g1h->task_queue(queue_num)),
-    _dcq(&g1h->dirty_card_queue_set()),
-    _ct_bs(g1h->g1_barrier_set()),
-    _g1_rem(g1h->g1_rem_set()),
-    _hash_seed(17), _queue_num(queue_num),
-    _term_attempts(0),
-    _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
-    _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
-    _age_table(false), _scanner(g1h, this, rp),
-    _strong_roots_time(0), _term_time(0),
-    _alloc_buffer_waste(0), _undo_waste(0) {
-  // we allocate G1YoungSurvRateNumRegions plus one entries, since
-  // we "sacrifice" entry 0 to keep track of surviving bytes for
-  // non-young regions (where the age is -1)
-  // We also add a few elements at the beginning and at the end in
-  // an attempt to eliminate cache contention
-  uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
-  uint array_length = PADDING_ELEM_NUM +
-                      real_length +
-                      PADDING_ELEM_NUM;
-  _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
-  if (_surviving_young_words_base == NULL)
-    vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
-                          "Not enough space for young surv histo.");
-  _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
-  memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
-
-  _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
-  _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
-
-  _start = os::elapsedTime();
-}
-
-void
-G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
-{
-  st->print_raw_cr("GC Termination Stats");
-  st->print_raw_cr("     elapsed  --strong roots-- -------termination-------"
-                   " ------waste (KiB)------");
-  st->print_raw_cr("thr     ms        ms      %        ms      %    attempts"
-                   "  total   alloc    undo");
-  st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
-                   " ------- ------- -------");
-}
-
-void
-G1ParScanThreadState::print_termination_stats(int i,
-                                              outputStream* const st) const
-{
-  const double elapsed_ms = elapsed_time() * 1000.0;
-  const double s_roots_ms = strong_roots_time() * 1000.0;
-  const double term_ms    = term_time() * 1000.0;
-  st->print_cr("%3d %9.2f %9.2f %6.2f "
-               "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
-               SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
-               i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
-               term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
-               (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
-               alloc_buffer_waste() * HeapWordSize / K,
-               undo_waste() * HeapWordSize / K);
-}
-
-#ifdef ASSERT
-bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
-  assert(ref != NULL, "invariant");
-  assert(UseCompressedOops, "sanity");
-  assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
-  oop p = oopDesc::load_decode_heap_oop(ref);
-  assert(_g1h->is_in_g1_reserved(p),
-         err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
-  return true;
-}
-
-bool G1ParScanThreadState::verify_ref(oop* ref) const {
-  assert(ref != NULL, "invariant");
-  if (has_partial_array_mask(ref)) {
-    // Must be in the collection set--it's already been copied.
-    oop p = clear_partial_array_mask(ref);
-    assert(_g1h->obj_in_cs(p),
-           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
-  } else {
-    oop p = oopDesc::load_decode_heap_oop(ref);
-    assert(_g1h->is_in_g1_reserved(p),
-           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
-  }
-  return true;
-}
-
-bool G1ParScanThreadState::verify_task(StarTask ref) const {
-  if (ref.is_narrow()) {
-    return verify_ref((narrowOop*) ref);
-  } else {
-    return verify_ref((oop*) ref);
-  }
-}
-#endif // ASSERT
-
-void G1ParScanThreadState::trim_queue() {
-  assert(_evac_failure_cl != NULL, "not set");
-
-  StarTask ref;
-  do {
-    // Drain the overflow stack first, so other threads can steal.
-    while (refs()->pop_overflow(ref)) {
-      deal_with_reference(ref);
-    }
-
-    while (refs()->pop_local(ref)) {
-      deal_with_reference(ref);
-    }
-  } while (!refs()->is_empty());
-}
-
-G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
-                                     G1ParScanThreadState* par_scan_state) :
-  _g1(g1), _par_scan_state(par_scan_state),
-  _worker_id(par_scan_state->queue_num()) { }
-
 void G1ParCopyHelper::mark_object(oop obj) {
   assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
 
@@ -4701,107 +4581,6 @@
   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
 }
 
-oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
-  size_t word_sz = old->size();
-  HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
-  // +1 to make the -1 indexes valid...
-  int       young_index = from_region->young_index_in_cset()+1;
-  assert( (from_region->is_young() && young_index >  0) ||
-         (!from_region->is_young() && young_index == 0), "invariant" );
-  G1CollectorPolicy* g1p = _g1h->g1_policy();
-  markOop m = old->mark();
-  int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
-                                           : m->age();
-  GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
-                                                             word_sz);
-  HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
-#ifndef PRODUCT
-  // Should this evacuation fail?
-  if (_g1h->evacuation_should_fail()) {
-    if (obj_ptr != NULL) {
-      undo_allocation(alloc_purpose, obj_ptr, word_sz);
-      obj_ptr = NULL;
-    }
-  }
-#endif // !PRODUCT
-
-  if (obj_ptr == NULL) {
-    // This will either forward-to-self, or detect that someone else has
-    // installed a forwarding pointer.
-    return _g1h->handle_evacuation_failure_par(this, old);
-  }
-
-  oop obj = oop(obj_ptr);
-
-  // We're going to allocate linearly, so might as well prefetch ahead.
-  Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
-
-  oop forward_ptr = old->forward_to_atomic(obj);
-  if (forward_ptr == NULL) {
-    Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
-
-    // alloc_purpose is just a hint to allocate() above, recheck the type of region
-    // we actually allocated from and update alloc_purpose accordingly
-    HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr);
-    alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
-
-    if (g1p->track_object_age(alloc_purpose)) {
-      // We could simply do obj->incr_age(). However, this causes a
-      // performance issue. obj->incr_age() will first check whether
-      // the object has a displaced mark by checking its mark word;
-      // getting the mark word from the new location of the object
-      // stalls. So, given that we already have the mark word and we
-      // are about to install it anyway, it's better to increase the
-      // age on the mark word, when the object does not have a
-      // displaced mark word. We're not expecting many objects to have
-      // a displaced marked word, so that case is not optimized
-      // further (it could be...) and we simply call obj->incr_age().
-
-      if (m->has_displaced_mark_helper()) {
-        // in this case, we have to install the mark word first,
-        // otherwise obj looks to be forwarded (the old mark word,
-        // which contains the forward pointer, was copied)
-        obj->set_mark(m);
-        obj->incr_age();
-      } else {
-        m = m->incr_age();
-        obj->set_mark(m);
-      }
-      age_table()->add(obj, word_sz);
-    } else {
-      obj->set_mark(m);
-    }
-
-    if (G1StringDedup::is_enabled()) {
-      G1StringDedup::enqueue_from_evacuation(from_region->is_young(),
-                                             to_region->is_young(),
-                                             queue_num(),
-                                             obj);
-    }
-
-    size_t* surv_young_words = surviving_young_words();
-    surv_young_words[young_index] += word_sz;
-
-    if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
-      // We keep track of the next start index in the length field of
-      // the to-space object. The actual length can be found in the
-      // length field of the from-space object.
-      arrayOop(obj)->set_length(0);
-      oop* old_p = set_partial_array_mask(old);
-      push_on_queue(old_p);
-    } else {
-      // No point in using the slower heap_region_containing() method,
-      // given that we know obj is in the heap.
-      _scanner.set_region(_g1h->heap_region_containing_raw(obj));
-      obj->oop_iterate_backwards(&_scanner);
-    }
-  } else {
-    undo_allocation(alloc_purpose, obj_ptr, word_sz);
-    obj = forward_ptr;
-  }
-  return obj;
-}
-
 template <class T>
 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
   if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
@@ -4809,7 +4588,7 @@
   }
 }
 
-template <G1Barrier barrier, bool do_mark_object>
+template <G1Barrier barrier, G1Mark do_mark_object>
 template <class T>
 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
   T heap_oop = oopDesc::load_heap_oop(p);
@@ -4831,7 +4610,7 @@
     }
     assert(forwardee != NULL, "forwardee should not be NULL");
     oopDesc::encode_store_heap_oop(p, forwardee);
-    if (do_mark_object && forwardee != obj) {
+    if (do_mark_object != G1MarkNone && forwardee != obj) {
       // If the object is self-forwarded we don't need to explicitly
       // mark it, the evacuation failure protocol will do so.
       mark_forwarded_object(obj, forwardee);
@@ -4842,9 +4621,8 @@
     }
   } else {
     // The object is not in collection set. If we're a root scanning
-    // closure during an initial mark pause (i.e. do_mark_object will
-    // be true) then attempt to mark the object.
-    if (do_mark_object) {
+    // closure during an initial mark pause then attempt to mark the object.
+    if (do_mark_object == G1MarkFromRoot) {
       mark_object(obj);
     }
   }
@@ -4854,8 +4632,8 @@
   }
 }
 
-template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p);
-template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p);
+template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
+template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
 
 class G1ParEvacuateFollowersClosure : public VoidClosure {
 protected:
@@ -4891,24 +4669,10 @@
 }
 
 void G1ParEvacuateFollowersClosure::do_void() {
-  StarTask stolen_task;
   G1ParScanThreadState* const pss = par_scan_state();
   pss->trim_queue();
-
   do {
-    while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
-      assert(pss->verify_task(stolen_task), "sanity");
-      if (stolen_task.is_narrow()) {
-        pss->deal_with_reference((narrowOop*) stolen_task);
-      } else {
-        pss->deal_with_reference((oop*) stolen_task);
-      }
-
-      // We've just processed a reference and we might have made
-      // available new entries on the queues. So we have to make sure
-      // we drain the queues as necessary.
-      pss->trim_queue();
-    }
+    pss->steal_and_trim_queue(queues());
   } while (!offer_termination());
 }
 
@@ -4954,8 +4718,7 @@
   }
 
 public:
-  G1ParTask(G1CollectedHeap* g1h,
-            RefToScanQueueSet *task_queues)
+  G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues)
     : AbstractGangTask("G1 collection"),
       _g1h(g1h),
       _queues(task_queues),
@@ -4983,6 +4746,51 @@
     _n_workers = active_workers;
   }
 
+  // Helps out with CLD processing.
+  //
+  // During InitialMark we need to:
+  // 1) Scavenge all CLDs for the young GC.
+  // 2) Mark all objects directly reachable from strong CLDs.
+  template <G1Mark do_mark_object>
+  class G1CLDClosure : public CLDClosure {
+    G1ParCopyClosure<G1BarrierNone,  do_mark_object>* _oop_closure;
+    G1ParCopyClosure<G1BarrierKlass, do_mark_object>  _oop_in_klass_closure;
+    G1KlassScanClosure                                _klass_in_cld_closure;
+    bool                                              _claim;
+
+   public:
+    G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
+                 bool only_young, bool claim)
+        : _oop_closure(oop_closure),
+          _oop_in_klass_closure(oop_closure->g1(),
+                                oop_closure->pss(),
+                                oop_closure->rp()),
+          _klass_in_cld_closure(&_oop_in_klass_closure, only_young),
+          _claim(claim) {
+
+    }
+
+    void do_cld(ClassLoaderData* cld) {
+      cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
+    }
+  };
+
+  class G1CodeBlobClosure: public CodeBlobClosure {
+    OopClosure* _f;
+
+   public:
+    G1CodeBlobClosure(OopClosure* f) : _f(f) {}
+    void do_code_blob(CodeBlob* blob) {
+      nmethod* that = blob->as_nmethod_or_null();
+      if (that != NULL) {
+        if (!that->test_set_oops_do_mark()) {
+          that->oops_do(_f);
+          that->fix_oop_relocations();
+        }
+      }
+    }
+  };
+
   void work(uint worker_id) {
     if (worker_id >= _n_workers) return;  // no work needed this round
 
@@ -5000,40 +4808,62 @@
 
       pss.set_evac_failure_closure(&evac_failure_cl);
 
-      G1ParScanExtRootClosure        only_scan_root_cl(_g1h, &pss, rp);
-      G1ParScanMetadataClosure       only_scan_metadata_cl(_g1h, &pss, rp);
-
-      G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
-      G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp);
-
-      bool only_young                 = _g1h->g1_policy()->gcs_are_young();
-      G1KlassScanClosure              scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false);
-      G1KlassScanClosure              only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young);
-
-      OopClosure*                    scan_root_cl = &only_scan_root_cl;
-      G1KlassScanClosure*            scan_klasses_cl = &only_scan_klasses_cl_s;
+      bool only_young = _g1h->g1_policy()->gcs_are_young();
+
+      // Non-IM young GC.
+      G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, &pss, rp);
+      G1CLDClosure<G1MarkNone>                                scan_only_cld_cl(&scan_only_root_cl,
+                                                                               only_young, // Only process dirty klasses.
+                                                                               false);     // No need to claim CLDs.
+      // IM young GC.
+      //    Strong roots closures.
+      G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot>         scan_mark_root_cl(_g1h, &pss, rp);
+      G1CLDClosure<G1MarkFromRoot>                            scan_mark_cld_cl(&scan_mark_root_cl,
+                                                                               false, // Process all klasses.
+                                                                               true); // Need to claim CLDs.
+      //    Weak roots closures.
+      G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
+      G1CLDClosure<G1MarkPromotedFromRoot>                    scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
+                                                                                    false, // Process all klasses.
+                                                                                    true); // Need to claim CLDs.
+
+      G1CodeBlobClosure scan_only_code_cl(&scan_only_root_cl);
+      G1CodeBlobClosure scan_mark_code_cl(&scan_mark_root_cl);
+      // IM Weak code roots are handled later.
+
+      OopClosure* strong_root_cl;
+      OopClosure* weak_root_cl;
+      CLDClosure* strong_cld_cl;
+      CLDClosure* weak_cld_cl;
+      CodeBlobClosure* strong_code_cl;
 
       if (_g1h->g1_policy()->during_initial_mark_pause()) {
         // We also need to mark copied objects.
-        scan_root_cl = &scan_mark_root_cl;
-        scan_klasses_cl = &scan_mark_klasses_cl_s;
+        strong_root_cl = &scan_mark_root_cl;
+        weak_root_cl   = &scan_mark_weak_root_cl;
+        strong_cld_cl  = &scan_mark_cld_cl;
+        weak_cld_cl    = &scan_mark_weak_cld_cl;
+        strong_code_cl = &scan_mark_code_cl;
+      } else {
+        strong_root_cl = &scan_only_root_cl;
+        weak_root_cl   = &scan_only_root_cl;
+        strong_cld_cl  = &scan_only_cld_cl;
+        weak_cld_cl    = &scan_only_cld_cl;
+        strong_code_cl = &scan_only_code_cl;
       }
 
-      G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
-
-      // Don't scan the scavengable methods in the code cache as part
-      // of strong root scanning. The code roots that point into a
-      // region in the collection set are scanned when we scan the
-      // region's RSet.
-      int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings;
+
+      G1ParPushHeapRSClosure  push_heap_rs_cl(_g1h, &pss);
 
       pss.start_strong_roots();
-      _g1h->g1_process_strong_roots(/* is scavenging */ true,
-                                    SharedHeap::ScanningOption(so),
-                                    scan_root_cl,
-                                    &push_heap_rs_cl,
-                                    scan_klasses_cl,
-                                    worker_id);
+      _g1h->g1_process_roots(strong_root_cl,
+                             weak_root_cl,
+                             &push_heap_rs_cl,
+                             strong_cld_cl,
+                             weak_cld_cl,
+                             strong_code_cl,
+                             worker_id);
+
       pss.end_strong_roots();
 
       {
@@ -5053,7 +4883,7 @@
         pss.print_termination_stats(worker_id);
       }
 
-      assert(pss.refs()->is_empty(), "should be empty");
+      assert(pss.queue_is_empty(), "should be empty");
 
       // Close the inner scope so that the ResourceMark and HandleMark
       // destructors are executed here and are included as part of the
@@ -5071,24 +4901,31 @@
 
 void
 G1CollectedHeap::
-g1_process_strong_roots(bool is_scavenging,
-                        ScanningOption so,
-                        OopClosure* scan_non_heap_roots,
-                        OopsInHeapRegionClosure* scan_rs,
-                        G1KlassScanClosure* scan_klasses,
-                        uint worker_i) {
-
-  // First scan the strong roots
+g1_process_roots(OopClosure* scan_non_heap_roots,
+                 OopClosure* scan_non_heap_weak_roots,
+                 OopsInHeapRegionClosure* scan_rs,
+                 CLDClosure* scan_strong_clds,
+                 CLDClosure* scan_weak_clds,
+                 CodeBlobClosure* scan_strong_code,
+                 uint worker_i) {
+
+  // First scan the shared roots.
   double ext_roots_start = os::elapsedTime();
   double closure_app_time_sec = 0.0;
 
+  bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
+
   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
-
-  process_strong_roots(false, // no scoping; this is parallel code
-                       so,
-                       &buf_scan_non_heap_roots,
-                       scan_klasses
-                       );
+  BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
+
+  process_roots(false, // no scoping; this is parallel code
+                SharedHeap::SO_None,
+                &buf_scan_non_heap_roots,
+                &buf_scan_non_heap_weak_roots,
+                scan_strong_clds,
+                // Initial Mark handles the weak CLDs separately.
+                (during_im ? NULL : scan_weak_clds),
+                scan_strong_code);
 
   // Now the CM ref_processor roots.
   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
@@ -5099,10 +4936,21 @@
     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
   }
 
+  if (during_im) {
+    // Barrier to make sure all workers passed
+    // the strong CLD and strong nmethods phases.
+    active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());
+
+    // Now take the complement of the strong CLDs.
+    ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
+  }
+
   // Finish up any enqueued closure apps (attributed as object copy time).
   buf_scan_non_heap_roots.done();
-
-  double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds();
+  buf_scan_non_heap_weak_roots.done();
+
+  double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
+      + buf_scan_non_heap_weak_roots.closure_app_seconds();
 
   g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
 
@@ -5126,22 +4974,10 @@
   }
   g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
 
-  // If this is an initial mark pause, and we're not scanning
-  // the entire code cache, we need to mark the oops in the
-  // strong code root lists for the regions that are not in
-  // the collection set.
-  // Note all threads participate in this set of root tasks.
-  double mark_strong_code_roots_ms = 0.0;
-  if (g1_policy()->during_initial_mark_pause() && !(so & SO_AllCodeCache)) {
-    double mark_strong_roots_start = os::elapsedTime();
-    mark_strong_code_roots(worker_i);
-    mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
-  }
-  g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
-
   // Now scan the complement of the collection set.
-  CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
-  g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
+  MarkingCodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots, CodeBlobToOopClosure::FixRelocations);
+
+  g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
 
   _process_strong_tasks->all_tasks_completed();
 }
@@ -5163,7 +4999,8 @@
   bool _do_in_parallel;
 public:
   G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
-    AbstractGangTask("Par String/Symbol table unlink"), _is_alive(is_alive),
+    AbstractGangTask("String/Symbol Unlinking"),
+    _is_alive(is_alive),
     _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
     _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
     _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
@@ -5185,6 +5022,14 @@
     guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
               err_msg("claim value %d after unlink less than initial symbol table size %d",
                       SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
+
+    if (G1TraceStringSymbolTableScrubbing) {
+      gclog_or_tty->print_cr("Cleaned string and symbol table, "
+                             "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
+                             "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
+                             strings_processed(), strings_removed(),
+                             symbols_processed(), symbols_removed());
+    }
   }
 
   void work(uint worker_id) {
@@ -5220,12 +5065,279 @@
   size_t symbols_removed()   const { return (size_t)_symbols_removed; }
 };
 
-void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
-                                                     bool process_strings, bool process_symbols) {
+class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC {
+private:
+  static Monitor* _lock;
+
+  BoolObjectClosure* const _is_alive;
+  const bool               _unloading_occurred;
+  const uint               _num_workers;
+
+  // Variables used to claim nmethods.
+  nmethod* _first_nmethod;
+  volatile nmethod* _claimed_nmethod;
+
+  // The list of nmethods that need to be processed by the second pass.
+  volatile nmethod* _postponed_list;
+  volatile uint     _num_entered_barrier;
+
+ public:
+  G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
+      _is_alive(is_alive),
+      _unloading_occurred(unloading_occurred),
+      _num_workers(num_workers),
+      _first_nmethod(NULL),
+      _claimed_nmethod(NULL),
+      _postponed_list(NULL),
+      _num_entered_barrier(0)
+  {
+    nmethod::increase_unloading_clock();
+    _first_nmethod = CodeCache::alive_nmethod(CodeCache::first());
+    _claimed_nmethod = (volatile nmethod*)_first_nmethod;
+  }
+
+  ~G1CodeCacheUnloadingTask() {
+    CodeCache::verify_clean_inline_caches();
+
+    CodeCache::set_needs_cache_clean(false);
+    guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");
+
+    CodeCache::verify_icholder_relocations();
+  }
+
+ private:
+  void add_to_postponed_list(nmethod* nm) {
+      nmethod* old;
+      do {
+        old = (nmethod*)_postponed_list;
+        nm->set_unloading_next(old);
+      } while ((nmethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
+  }
+
+  void clean_nmethod(nmethod* nm) {
+    bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
+
+    if (postponed) {
+      // This nmethod referred to an nmethod that has not been cleaned/unloaded yet.
+      add_to_postponed_list(nm);
+    }
+
+    // Mark that this thread has been cleaned/unloaded.
+    // After this call, it will be safe to ask if this nmethod was unloaded or not.
+    nm->set_unloading_clock(nmethod::global_unloading_clock());
+  }
+
+  void clean_nmethod_postponed(nmethod* nm) {
+    nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);
+  }
+
+  static const int MaxClaimNmethods = 16;
+
+  void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {
+    nmethod* first;
+    nmethod* last;
+
+    do {
+      *num_claimed_nmethods = 0;
+
+      first = last = (nmethod*)_claimed_nmethod;
+
+      if (first != NULL) {
+        for (int i = 0; i < MaxClaimNmethods; i++) {
+          last = CodeCache::alive_nmethod(CodeCache::next(last));
+
+          if (last == NULL) {
+            break;
+          }
+
+          claimed_nmethods[i] = last;
+          (*num_claimed_nmethods)++;
+        }
+      }
+
+    } while ((nmethod*)Atomic::cmpxchg_ptr(last, &_claimed_nmethod, first) != first);
+  }
+
+  nmethod* claim_postponed_nmethod() {
+    nmethod* claim;
+    nmethod* next;
+
+    do {
+      claim = (nmethod*)_postponed_list;
+      if (claim == NULL) {
+        return NULL;
+      }
+
+      next = claim->unloading_next();
+
+    } while ((nmethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
+
+    return claim;
+  }
+
+ public:
+  // Mark that we're done with the first pass of nmethod cleaning.
+  void barrier_mark(uint worker_id) {
+    MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
+    _num_entered_barrier++;
+    if (_num_entered_barrier == _num_workers) {
+      ml.notify_all();
+    }
+  }
+
+  // See if we have to wait for the other workers to
+  // finish their first-pass nmethod cleaning work.
+  void barrier_wait(uint worker_id) {
+    if (_num_entered_barrier < _num_workers) {
+      MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
+      while (_num_entered_barrier < _num_workers) {
+          ml.wait(Mutex::_no_safepoint_check_flag, 0, false);
+      }
+    }
+  }
+
+  // Cleaning and unloading of nmethods. Some work has to be postponed
+  // to the second pass, when we know which nmethods survive.
+  void work_first_pass(uint worker_id) {
+    // The first nmethods is claimed by the first worker.
+    if (worker_id == 0 && _first_nmethod != NULL) {
+      clean_nmethod(_first_nmethod);
+      _first_nmethod = NULL;
+    }
+
+    int num_claimed_nmethods;
+    nmethod* claimed_nmethods[MaxClaimNmethods];
+
+    while (true) {
+      claim_nmethods(claimed_nmethods, &num_claimed_nmethods);
+
+      if (num_claimed_nmethods == 0) {
+        break;
+      }
+
+      for (int i = 0; i < num_claimed_nmethods; i++) {
+        clean_nmethod(claimed_nmethods[i]);
+      }
+    }
+  }
+
+  void work_second_pass(uint worker_id) {
+    nmethod* nm;
+    // Take care of postponed nmethods.
+    while ((nm = claim_postponed_nmethod()) != NULL) {
+      clean_nmethod_postponed(nm);
+    }
+  }
+};
+
+Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock");
+
+class G1KlassCleaningTask : public StackObj {
+  BoolObjectClosure*                      _is_alive;
+  volatile jint                           _clean_klass_tree_claimed;
+  ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator;
+
+ public:
+  G1KlassCleaningTask(BoolObjectClosure* is_alive) :
+      _is_alive(is_alive),
+      _clean_klass_tree_claimed(0),
+      _klass_iterator() {
+  }
+
+ private:
+  bool claim_clean_klass_tree_task() {
+    if (_clean_klass_tree_claimed) {
+      return false;
+    }
+
+    return Atomic::cmpxchg(1, (jint*)&_clean_klass_tree_claimed, 0) == 0;
+  }
+
+  InstanceKlass* claim_next_klass() {
+    Klass* klass;
+    do {
+      klass =_klass_iterator.next_klass();
+    } while (klass != NULL && !klass->oop_is_instance());
+
+    return (InstanceKlass*)klass;
+  }
+
+public:
+
+  void clean_klass(InstanceKlass* ik) {
+    ik->clean_implementors_list(_is_alive);
+    ik->clean_method_data(_is_alive);
+
+    // G1 specific cleanup work that has
+    // been moved here to be done in parallel.
+    ik->clean_dependent_nmethods();
+  }
+
+  void work() {
+    ResourceMark rm;
+
+    // One worker will clean the subklass/sibling klass tree.
+    if (claim_clean_klass_tree_task()) {
+      Klass::clean_subklass_tree(_is_alive);
+    }
+
+    // All workers will help cleaning the classes,
+    InstanceKlass* klass;
+    while ((klass = claim_next_klass()) != NULL) {
+      clean_klass(klass);
+    }
+  }
+};
+
+// To minimize the remark pause times, the tasks below are done in parallel.
+class G1ParallelCleaningTask : public AbstractGangTask {
+private:
+  G1StringSymbolTableUnlinkTask _string_symbol_task;
+  G1CodeCacheUnloadingTask      _code_cache_task;
+  G1KlassCleaningTask           _klass_cleaning_task;
+
+public:
+  // The constructor is run in the VMThread.
+  G1ParallelCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, uint num_workers, bool unloading_occurred) :
+      AbstractGangTask("Parallel Cleaning"),
+      _string_symbol_task(is_alive, process_strings, process_symbols),
+      _code_cache_task(num_workers, is_alive, unloading_occurred),
+      _klass_cleaning_task(is_alive) {
+  }
+
+  // The parallel work done by all worker threads.
+  void work(uint worker_id) {
+    // Do first pass of code cache cleaning.
+    _code_cache_task.work_first_pass(worker_id);
+
+    // Let the threads mark that the first pass is done.
+    _code_cache_task.barrier_mark(worker_id);
+
+    // Clean the Strings and Symbols.
+    _string_symbol_task.work(worker_id);
+
+    // Wait for all workers to finish the first code cache cleaning pass.
+    _code_cache_task.barrier_wait(worker_id);
+
+    // Do the second code cache cleaning work, which realize on
+    // the liveness information gathered during the first pass.
+    _code_cache_task.work_second_pass(worker_id);
+
+    // Clean all klasses that were not unloaded.
+    _klass_cleaning_task.work();
+  }
+};
+
+
+void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
+                                        bool process_strings,
+                                        bool process_symbols,
+                                        bool class_unloading_occurred) {
   uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
-                   _g1h->workers()->active_workers() : 1);
-
-  G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
+                    workers()->active_workers() : 1);
+
+  G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
+                                        n_workers, class_unloading_occurred);
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     set_par_threads(n_workers);
     workers()->run_task(&g1_unlink_task);
@@ -5233,12 +5345,21 @@
   } else {
     g1_unlink_task.work(0);
   }
-  if (G1TraceStringSymbolTableScrubbing) {
-    gclog_or_tty->print_cr("Cleaned string and symbol table, "
-                           "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
-                           "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
-                           g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(),
-                           g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed());
+}
+
+void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
+                                                     bool process_strings, bool process_symbols) {
+  {
+    uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
+                     _g1h->workers()->active_workers() : 1);
+    G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
+    if (G1CollectedHeap::use_parallel_gc_threads()) {
+      set_par_threads(n_workers);
+      workers()->run_task(&g1_unlink_task);
+      set_par_threads(0);
+    } else {
+      g1_unlink_task.work(0);
+    }
   }
 
   if (G1StringDedup::is_enabled()) {
@@ -5577,8 +5698,7 @@
 
     pss.set_evac_failure_closure(&evac_failure_cl);
 
-    assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
-
+    assert(pss.queue_is_empty(), "both queue and overflow should be empty");
 
     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
 
@@ -5632,7 +5752,7 @@
     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
     drain_queue.do_void();
     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
-    assert(pss.refs()->is_empty(), "should be");
+    assert(pss.queue_is_empty(), "should be");
   }
 };
 
@@ -5699,7 +5819,7 @@
 
   pss.set_evac_failure_closure(&evac_failure_cl);
 
-  assert(pss.refs()->is_empty(), "pre-condition");
+  assert(pss.queue_is_empty(), "pre-condition");
 
   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
 
@@ -5747,7 +5867,7 @@
   _gc_tracer_stw->report_gc_reference_stats(stats);
 
   // We have completed copying any necessary live referent objects.
-  assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
+  assert(pss.queue_is_empty(), "both queue and overflow should be empty");
 
   double ref_proc_time = os::elapsedTime() - ref_proc_start;
   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
@@ -5832,6 +5952,10 @@
 
   {
     StrongRootsScope srs(this);
+    // InitialMark needs claim bits to keep track of the marked-through CLDs.
+    if (g1_policy()->during_initial_mark_pause()) {
+      ClassLoaderDataGraph::clear_claimed_marks();
+    }
 
     if (G1CollectedHeap::use_parallel_gc_threads()) {
       // The individual threads will set their evac-failure closures.
@@ -6603,7 +6727,7 @@
       // We really only need to do this for old regions given that we
       // should never scan survivors. But it doesn't hurt to do it
       // for survivors too.
-      new_alloc_region->set_saved_mark();
+      new_alloc_region->record_top_and_timestamp();
       if (survivor) {
         new_alloc_region->set_survivor();
         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
@@ -6867,106 +6991,6 @@
   g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
 }
 
-// Mark all the code roots that point into regions *not* in the
-// collection set.
-//
-// Note we do not want to use a "marking" CodeBlobToOopClosure while
-// walking the the code roots lists of regions not in the collection
-// set. Suppose we have an nmethod (M) that points to objects in two
-// separate regions - one in the collection set (R1) and one not (R2).
-// Using a "marking" CodeBlobToOopClosure here would result in "marking"
-// nmethod M when walking the code roots for R1. When we come to scan
-// the code roots for R2, we would see that M is already marked and it
-// would be skipped and the objects in R2 that are referenced from M
-// would not be evacuated.
-
-class MarkStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
-
-  class MarkStrongCodeRootOopClosure: public OopClosure {
-    ConcurrentMark* _cm;
-    HeapRegion* _hr;
-    uint _worker_id;
-
-    template <class T> void do_oop_work(T* p) {
-      T heap_oop = oopDesc::load_heap_oop(p);
-      if (!oopDesc::is_null(heap_oop)) {
-        oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-        // Only mark objects in the region (which is assumed
-        // to be not in the collection set).
-        if (_hr->is_in(obj)) {
-          _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
-        }
-      }
-    }
-
-  public:
-    MarkStrongCodeRootOopClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id) :
-      _cm(cm), _hr(hr), _worker_id(worker_id) {
-      assert(!_hr->in_collection_set(), "sanity");
-    }
-
-    void do_oop(narrowOop* p) { do_oop_work(p); }
-    void do_oop(oop* p)       { do_oop_work(p); }
-  };
-
-  MarkStrongCodeRootOopClosure _oop_cl;
-
-public:
-  MarkStrongCodeRootCodeBlobClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id):
-    _oop_cl(cm, hr, worker_id) {}
-
-  void do_code_blob(CodeBlob* cb) {
-    nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
-    if (nm != NULL) {
-      nm->oops_do(&_oop_cl);
-    }
-  }
-};
-
-class MarkStrongCodeRootsHRClosure: public HeapRegionClosure {
-  G1CollectedHeap* _g1h;
-  uint _worker_id;
-
-public:
-  MarkStrongCodeRootsHRClosure(G1CollectedHeap* g1h, uint worker_id) :
-    _g1h(g1h), _worker_id(worker_id) {}
-
-  bool doHeapRegion(HeapRegion *hr) {
-    HeapRegionRemSet* hrrs = hr->rem_set();
-    if (hr->continuesHumongous()) {
-      // Code roots should never be attached to a continuation of a humongous region
-      assert(hrrs->strong_code_roots_list_length() == 0,
-             err_msg("code roots should never be attached to continuations of humongous region "HR_FORMAT
-                     " starting at "HR_FORMAT", but has "SIZE_FORMAT,
-                     HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()),
-                     hrrs->strong_code_roots_list_length()));
-      return false;
-    }
-
-    if (hr->in_collection_set()) {
-      // Don't mark code roots into regions in the collection set here.
-      // They will be marked when we scan them.
-      return false;
-    }
-
-    MarkStrongCodeRootCodeBlobClosure cb_cl(_g1h->concurrent_mark(), hr, _worker_id);
-    hr->strong_code_roots_do(&cb_cl);
-    return false;
-  }
-};
-
-void G1CollectedHeap::mark_strong_code_roots(uint worker_id) {
-  MarkStrongCodeRootsHRClosure cl(this, worker_id);
-  if (G1CollectedHeap::use_parallel_gc_threads()) {
-    heap_region_par_iterate_chunked(&cl,
-                                    worker_id,
-                                    workers()->active_workers(),
-                                    HeapRegion::ParMarkRootClaimValue);
-  } else {
-    heap_region_iterate(&cl);
-  }
-}
-
 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
   G1CollectedHeap* _g1h;
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -31,7 +31,6 @@
 #include "gc_implementation/g1/g1BiasedArray.hpp"
 #include "gc_implementation/g1/g1HRPrinter.hpp"
 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
-#include "gc_implementation/g1/g1RemSet.hpp"
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/g1YCTypes.hpp"
 #include "gc_implementation/g1/heapRegionSeq.hpp"
@@ -211,6 +210,7 @@
 class RefineCardTableEntryClosure;
 
 class G1CollectedHeap : public SharedHeap {
+  friend class VM_CollectForMetadataAllocation;
   friend class VM_G1CollectForAllocation;
   friend class VM_G1CollectFull;
   friend class VM_G1IncCollectionPause;
@@ -220,7 +220,7 @@
   friend class OldGCAllocRegion;
 
   // Closures used in implementation.
-  template <G1Barrier barrier, bool do_mark_object>
+  template <G1Barrier barrier, G1Mark do_mark_object>
   friend class G1ParCopyClosure;
   friend class G1IsAliveClosure;
   friend class G1EvacuateFollowersClosure;
@@ -347,6 +347,9 @@
   // It initializes the GC alloc regions at the start of a GC.
   void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
 
+  // Setup the retained old gc alloc region as the currrent old gc alloc region.
+  void use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info);
+
   // It releases the GC alloc regions at the end of a GC.
   void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
 
@@ -828,12 +831,13 @@
   // param is for use with parallel roots processing, and should be
   // the "i" of the calling parallel worker thread's work(i) function.
   // In the sequential case this param will be ignored.
-  void g1_process_strong_roots(bool is_scavenging,
-                               ScanningOption so,
-                               OopClosure* scan_non_heap_roots,
-                               OopsInHeapRegionClosure* scan_rs,
-                               G1KlassScanClosure* scan_klasses,
-                               uint worker_i);
+  void g1_process_roots(OopClosure* scan_non_heap_roots,
+                        OopClosure* scan_non_heap_weak_roots,
+                        OopsInHeapRegionClosure* scan_rs,
+                        CLDClosure* scan_strong_clds,
+                        CLDClosure* scan_weak_clds,
+                        CodeBlobClosure* scan_strong_code,
+                        uint worker_i);
 
   // Notifies all the necessary spaces that the committed space has
   // been updated (either expanded or shrunk). It should be called
@@ -1026,7 +1030,7 @@
   // of G1CollectedHeap::_gc_time_stamp.
   unsigned int* _worker_cset_start_region_time_stamp;
 
-  enum G1H_process_strong_roots_tasks {
+  enum G1H_process_roots_tasks {
     G1H_PS_filter_satb_buffers,
     G1H_PS_refProcessor_oops_do,
     // Leave this one last.
@@ -1608,10 +1612,6 @@
   // Free up superfluous code root memory.
   void purge_code_root_memory();
 
-  // During an initial mark pause, mark all the code roots that
-  // point into regions *not* in the collection set.
-  void mark_strong_code_roots(uint worker_id);
-
   // Rebuild the strong code root lists for each region
   // after a full GC.
   void rebuild_strong_code_roots();
@@ -1620,6 +1620,9 @@
   // in symbol table, possibly in parallel.
   void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
 
+  // Parallel phase of unloading/cleaning after G1 concurrent mark.
+  void parallel_cleaning(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, bool class_unloading_occurred);
+
   // Redirty logged cards in the refinement queue.
   void redirty_logged_cards();
   // Verification
@@ -1715,256 +1718,4 @@
   }
 };
 
-class G1ParScanThreadState : public StackObj {
-protected:
-  G1CollectedHeap* _g1h;
-  RefToScanQueue*  _refs;
-  DirtyCardQueue   _dcq;
-  G1SATBCardTableModRefBS* _ct_bs;
-  G1RemSet* _g1_rem;
-
-  G1ParGCAllocBuffer  _surviving_alloc_buffer;
-  G1ParGCAllocBuffer  _tenured_alloc_buffer;
-  G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
-  ageTable            _age_table;
-
-  G1ParScanClosure    _scanner;
-
-  size_t           _alloc_buffer_waste;
-  size_t           _undo_waste;
-
-  OopsInHeapRegionClosure*      _evac_failure_cl;
-
-  int  _hash_seed;
-  uint _queue_num;
-
-  size_t _term_attempts;
-
-  double _start;
-  double _start_strong_roots;
-  double _strong_roots_time;
-  double _start_term;
-  double _term_time;
-
-  // Map from young-age-index (0 == not young, 1 is youngest) to
-  // surviving words. base is what we get back from the malloc call
-  size_t* _surviving_young_words_base;
-  // this points into the array, as we use the first few entries for padding
-  size_t* _surviving_young_words;
-
-#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
-
-  void   add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
-
-  void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
-
-  DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
-  G1SATBCardTableModRefBS* ctbs()                { return _ct_bs; }
-
-  template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
-
-  template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
-    // If the new value of the field points to the same region or
-    // is the to-space, we don't need to include it in the Rset updates.
-    if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
-      size_t card_index = ctbs()->index_for(p);
-      // If the card hasn't been added to the buffer, do it.
-      if (ctbs()->mark_card_deferred(card_index)) {
-        dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
-      }
-    }
-  }
-
-public:
-  G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
-
-  ~G1ParScanThreadState() {
-    retire_alloc_buffers();
-    FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
-  }
-
-  RefToScanQueue*   refs()            { return _refs;             }
-  ageTable*         age_table()       { return &_age_table;       }
-
-  G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
-    return _alloc_buffers[purpose];
-  }
-
-  size_t alloc_buffer_waste() const              { return _alloc_buffer_waste; }
-  size_t undo_waste() const                      { return _undo_waste; }
-
-#ifdef ASSERT
-  bool verify_ref(narrowOop* ref) const;
-  bool verify_ref(oop* ref) const;
-  bool verify_task(StarTask ref) const;
-#endif // ASSERT
-
-  template <class T> void push_on_queue(T* ref) {
-    assert(verify_ref(ref), "sanity");
-    refs()->push(ref);
-  }
-
-  template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
-
-  HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
-    HeapWord* obj = NULL;
-    size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
-    if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
-      G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
-      add_to_alloc_buffer_waste(alloc_buf->words_remaining());
-      alloc_buf->retire(false /* end_of_gc */, false /* retain */);
-
-      HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
-      if (buf == NULL) return NULL; // Let caller handle allocation failure.
-      // Otherwise.
-      alloc_buf->set_word_size(gclab_word_size);
-      alloc_buf->set_buf(buf);
-
-      obj = alloc_buf->allocate(word_sz);
-      assert(obj != NULL, "buffer was definitely big enough...");
-    } else {
-      obj = _g1h->par_allocate_during_gc(purpose, word_sz);
-    }
-    return obj;
-  }
-
-  HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
-    HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
-    if (obj != NULL) return obj;
-    return allocate_slow(purpose, word_sz);
-  }
-
-  void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
-    if (alloc_buffer(purpose)->contains(obj)) {
-      assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
-             "should contain whole object");
-      alloc_buffer(purpose)->undo_allocation(obj, word_sz);
-    } else {
-      CollectedHeap::fill_with_object(obj, word_sz);
-      add_to_undo_waste(word_sz);
-    }
-  }
-
-  void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
-    _evac_failure_cl = evac_failure_cl;
-  }
-  OopsInHeapRegionClosure* evac_failure_closure() {
-    return _evac_failure_cl;
-  }
-
-  int* hash_seed() { return &_hash_seed; }
-  uint queue_num() { return _queue_num; }
-
-  size_t term_attempts() const  { return _term_attempts; }
-  void note_term_attempt() { _term_attempts++; }
-
-  void start_strong_roots() {
-    _start_strong_roots = os::elapsedTime();
-  }
-  void end_strong_roots() {
-    _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
-  }
-  double strong_roots_time() const { return _strong_roots_time; }
-
-  void start_term_time() {
-    note_term_attempt();
-    _start_term = os::elapsedTime();
-  }
-  void end_term_time() {
-    _term_time += (os::elapsedTime() - _start_term);
-  }
-  double term_time() const { return _term_time; }
-
-  double elapsed_time() const {
-    return os::elapsedTime() - _start;
-  }
-
-  static void
-    print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
-  void
-    print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
-
-  size_t* surviving_young_words() {
-    // We add on to hide entry 0 which accumulates surviving words for
-    // age -1 regions (i.e. non-young ones)
-    return _surviving_young_words;
-  }
-
-private:
-  void retire_alloc_buffers() {
-    for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-      size_t waste = _alloc_buffers[ap]->words_remaining();
-      add_to_alloc_buffer_waste(waste);
-      _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
-                                                 true /* end_of_gc */,
-                                                 false /* retain */);
-    }
-  }
-
-#define G1_PARTIAL_ARRAY_MASK 0x2
-
-  inline bool has_partial_array_mask(oop* ref) const {
-    return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
-  }
-
-  // We never encode partial array oops as narrowOop*, so return false immediately.
-  // This allows the compiler to create optimized code when popping references from
-  // the work queue.
-  inline bool has_partial_array_mask(narrowOop* ref) const {
-    assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
-    return false;
-  }
-
-  // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
-  // We always encode partial arrays as regular oop, to allow the
-  // specialization for has_partial_array_mask() for narrowOops above.
-  // This means that unintentional use of this method with narrowOops are caught
-  // by the compiler.
-  inline oop* set_partial_array_mask(oop obj) const {
-    assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
-    return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
-  }
-
-  inline oop clear_partial_array_mask(oop* ref) const {
-    return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
-  }
-
-  inline void do_oop_partial_array(oop* p);
-
-  // This method is applied to the fields of the objects that have just been copied.
-  template <class T> void do_oop_evac(T* p, HeapRegion* from) {
-    assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
-           "Reference should not be NULL here as such are never pushed to the task queue.");
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
-
-    // Although we never intentionally push references outside of the collection
-    // set, due to (benign) races in the claim mechanism during RSet scanning more
-    // than one thread might claim the same card. So the same card may be
-    // processed multiple times. So redo this check.
-    if (_g1h->in_cset_fast_test(obj)) {
-      oop forwardee;
-      if (obj->is_forwarded()) {
-        forwardee = obj->forwardee();
-      } else {
-        forwardee = copy_to_survivor_space(obj);
-      }
-      assert(forwardee != NULL, "forwardee should not be NULL");
-      oopDesc::encode_store_heap_oop(p, forwardee);
-    }
-
-    assert(obj != NULL, "Must be");
-    update_rs(from, p, queue_num());
-  }
-public:
-
-  oop copy_to_survivor_space(oop const obj);
-
-  template <class T> inline void deal_with_reference(T* ref_to_scan);
-
-  inline void deal_with_reference(StarTask ref);
-
-public:
-  void trim_queue();
-};
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -29,7 +29,6 @@
 #include "gc_implementation/g1/g1CollectedHeap.hpp"
 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
-#include "gc_implementation/g1/g1RemSet.inline.hpp"
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
@@ -289,89 +288,4 @@
   return is_obj_ill(obj, heap_region_containing(obj));
 }
 
-template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) {
-  if (!from->is_survivor()) {
-    _g1_rem->par_write_ref(from, p, tid);
-  }
-}
-
-template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) {
-  if (G1DeferredRSUpdate) {
-    deferred_rs_update(from, p, tid);
-  } else {
-    immediate_rs_update(from, p, tid);
-  }
-}
-
-
-inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
-  assert(has_partial_array_mask(p), "invariant");
-  oop from_obj = clear_partial_array_mask(p);
-
-  assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
-  assert(from_obj->is_objArray(), "must be obj array");
-  objArrayOop from_obj_array = objArrayOop(from_obj);
-  // The from-space object contains the real length.
-  int length                 = from_obj_array->length();
-
-  assert(from_obj->is_forwarded(), "must be forwarded");
-  oop to_obj                 = from_obj->forwardee();
-  assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
-  objArrayOop to_obj_array   = objArrayOop(to_obj);
-  // We keep track of the next start index in the length field of the
-  // to-space object.
-  int next_index             = to_obj_array->length();
-  assert(0 <= next_index && next_index < length,
-         err_msg("invariant, next index: %d, length: %d", next_index, length));
-
-  int start                  = next_index;
-  int end                    = length;
-  int remainder              = end - start;
-  // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
-  if (remainder > 2 * ParGCArrayScanChunk) {
-    end = start + ParGCArrayScanChunk;
-    to_obj_array->set_length(end);
-    // Push the remainder before we process the range in case another
-    // worker has run out of things to do and can steal it.
-    oop* from_obj_p = set_partial_array_mask(from_obj);
-    push_on_queue(from_obj_p);
-  } else {
-    assert(length == end, "sanity");
-    // We'll process the final range for this object. Restore the length
-    // so that the heap remains parsable in case of evacuation failure.
-    to_obj_array->set_length(end);
-  }
-  _scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
-  // Process indexes [start,end). It will also process the header
-  // along with the first chunk (i.e., the chunk with start == 0).
-  // Note that at this point the length field of to_obj_array is not
-  // correct given that we are using it to keep track of the next
-  // start index. oop_iterate_range() (thankfully!) ignores the length
-  // field and only relies on the start / end parameters.  It does
-  // however return the size of the object which will be incorrect. So
-  // we have to ignore it even if we wanted to use it.
-  to_obj_array->oop_iterate_range(&_scanner, start, end);
-}
-
-template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
-  if (!has_partial_array_mask(ref_to_scan)) {
-    // Note: we can use "raw" versions of "region_containing" because
-    // "obj_to_scan" is definitely in the heap, and is not in a
-    // humongous region.
-    HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
-    do_oop_evac(ref_to_scan, r);
-  } else {
-    do_oop_partial_array((oop*)ref_to_scan);
-  }
-}
-
-inline void G1ParScanThreadState::deal_with_reference(StarTask ref) {
-  assert(verify_task(ref), "sanity");
-  if (ref.is_narrow()) {
-    deal_with_reference((narrowOop*)ref);
-  } else {
-    deal_with_reference((oop*)ref);
-  }
-}
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -71,6 +71,9 @@
   bool _during_initial_mark;
   bool _during_conc_mark;
   uint _worker_id;
+  HeapWord* _end_of_last_gap;
+  HeapWord* _last_gap_threshold;
+  HeapWord* _last_obj_threshold;
 
 public:
   RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
@@ -83,7 +86,10 @@
     _update_rset_cl(update_rset_cl),
     _during_initial_mark(during_initial_mark),
     _during_conc_mark(during_conc_mark),
-    _worker_id(worker_id) { }
+    _worker_id(worker_id),
+    _end_of_last_gap(hr->bottom()),
+    _last_gap_threshold(hr->bottom()),
+    _last_obj_threshold(hr->bottom()) { }
 
   size_t marked_bytes() { return _marked_bytes; }
 
@@ -107,7 +113,12 @@
     HeapWord* obj_addr = (HeapWord*) obj;
     assert(_hr->is_in(obj_addr), "sanity");
     size_t obj_size = obj->size();
-    _hr->update_bot_for_object(obj_addr, obj_size);
+    HeapWord* obj_end = obj_addr + obj_size;
+
+    if (_end_of_last_gap != obj_addr) {
+      // there was a gap before obj_addr
+      _last_gap_threshold = _hr->cross_threshold(_end_of_last_gap, obj_addr);
+    }
 
     if (obj->is_forwarded() && obj->forwardee() == obj) {
       // The object failed to move.
@@ -115,7 +126,9 @@
       // We consider all objects that we find self-forwarded to be
       // live. What we'll do is that we'll update the prev marking
       // info so that they are all under PTAMS and explicitly marked.
-      _cm->markPrev(obj);
+      if (!_cm->isPrevMarked(obj)) {
+        _cm->markPrev(obj);
+      }
       if (_during_initial_mark) {
         // For the next marking info we'll only mark the
         // self-forwarded objects explicitly if we are during
@@ -145,13 +158,18 @@
       // remembered set entries missing given that we skipped cards on
       // the collection set. So, we'll recreate such entries now.
       obj->oop_iterate(_update_rset_cl);
-      assert(_cm->isPrevMarked(obj), "Should be marked!");
     } else {
+
       // The object has been either evacuated or is dead. Fill it with a
       // dummy object.
-      MemRegion mr((HeapWord*) obj, obj_size);
+      MemRegion mr(obj_addr, obj_size);
       CollectedHeap::fill_with_object(mr);
+
+      // must nuke all dead objects which we skipped when iterating over the region
+      _cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end));
     }
+    _end_of_last_gap = obj_end;
+    _last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end);
   }
 };
 
@@ -182,13 +200,6 @@
                                             during_conc_mark,
                                             _worker_id);
 
-        MemRegion mr(hr->bottom(), hr->end());
-        // We'll recreate the prev marking info so we'll first clear
-        // the prev bitmap range for this region. We never mark any
-        // CSet objects explicitly so the next bitmap range should be
-        // cleared anyway.
-        _cm->clearRangePrevBitmap(mr);
-
         hr->note_self_forwarding_removal_start(during_initial_mark,
                                                during_conc_mark);
         _g1h->check_bitmaps("Self-Forwarding Ptr Removal", hr);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -167,7 +167,6 @@
   _last_update_rs_processed_buffers(_max_gc_threads, "%d"),
   _last_scan_rs_times_ms(_max_gc_threads, "%.1lf"),
   _last_strong_code_root_scan_times_ms(_max_gc_threads, "%.1lf"),
-  _last_strong_code_root_mark_times_ms(_max_gc_threads, "%.1lf"),
   _last_obj_copy_times_ms(_max_gc_threads, "%.1lf"),
   _last_termination_times_ms(_max_gc_threads, "%.1lf"),
   _last_termination_attempts(_max_gc_threads, SIZE_FORMAT),
@@ -194,7 +193,6 @@
   _last_update_rs_processed_buffers.reset();
   _last_scan_rs_times_ms.reset();
   _last_strong_code_root_scan_times_ms.reset();
-  _last_strong_code_root_mark_times_ms.reset();
   _last_obj_copy_times_ms.reset();
   _last_termination_times_ms.reset();
   _last_termination_attempts.reset();
@@ -215,7 +213,6 @@
   _last_update_rs_processed_buffers.verify();
   _last_scan_rs_times_ms.verify();
   _last_strong_code_root_scan_times_ms.verify();
-  _last_strong_code_root_mark_times_ms.verify();
   _last_obj_copy_times_ms.verify();
   _last_termination_times_ms.verify();
   _last_termination_attempts.verify();
@@ -230,7 +227,6 @@
                                _last_update_rs_times_ms.get(i) +
                                _last_scan_rs_times_ms.get(i) +
                                _last_strong_code_root_scan_times_ms.get(i) +
-                               _last_strong_code_root_mark_times_ms.get(i) +
                                _last_obj_copy_times_ms.get(i) +
                                _last_termination_times_ms.get(i);
 
@@ -302,9 +298,6 @@
     if (_last_satb_filtering_times_ms.sum() > 0.0) {
       _last_satb_filtering_times_ms.print(2, "SATB Filtering (ms)");
     }
-    if (_last_strong_code_root_mark_times_ms.sum() > 0.0) {
-     _last_strong_code_root_mark_times_ms.print(2, "Code Root Marking (ms)");
-    }
     _last_update_rs_times_ms.print(2, "Update RS (ms)");
       _last_update_rs_processed_buffers.print(3, "Processed Buffers");
     _last_scan_rs_times_ms.print(2, "Scan RS (ms)");
@@ -322,9 +315,6 @@
     if (_last_satb_filtering_times_ms.sum() > 0.0) {
       _last_satb_filtering_times_ms.print(1, "SATB Filtering (ms)");
     }
-    if (_last_strong_code_root_mark_times_ms.sum() > 0.0) {
-      _last_strong_code_root_mark_times_ms.print(1, "Code Root Marking (ms)");
-    }
     _last_update_rs_times_ms.print(1, "Update RS (ms)");
       _last_update_rs_processed_buffers.print(2, "Processed Buffers");
     _last_scan_rs_times_ms.print(1, "Scan RS (ms)");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -120,7 +120,6 @@
   WorkerDataArray<int>    _last_update_rs_processed_buffers;
   WorkerDataArray<double> _last_scan_rs_times_ms;
   WorkerDataArray<double> _last_strong_code_root_scan_times_ms;
-  WorkerDataArray<double> _last_strong_code_root_mark_times_ms;
   WorkerDataArray<double> _last_obj_copy_times_ms;
   WorkerDataArray<double> _last_termination_times_ms;
   WorkerDataArray<size_t> _last_termination_attempts;
@@ -199,10 +198,6 @@
     _last_strong_code_root_scan_times_ms.set(worker_i, ms);
   }
 
-  void record_strong_code_root_mark_time(uint worker_i, double ms) {
-    _last_strong_code_root_mark_times_ms.set(worker_i, ms);
-  }
-
   void record_obj_copy_time(uint worker_i, double ms) {
     _last_obj_copy_times_ms.set(worker_i, ms);
   }
@@ -369,10 +364,6 @@
     return _last_strong_code_root_scan_times_ms.average();
   }
 
-  double average_last_strong_code_root_mark_time(){
-    return _last_strong_code_root_mark_times_ms.average();
-  }
-
   double average_last_obj_copy_time() {
     return _last_obj_copy_times_ms.average();
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -129,13 +129,15 @@
 
   SharedHeap* sh = SharedHeap::heap();
 
-  // Need cleared claim bits for the strong roots processing
+  // Need cleared claim bits for the roots processing
   ClassLoaderDataGraph::clear_claimed_marks();
 
-  sh->process_strong_roots(true,  // activate StrongRootsScope
-                           SharedHeap::SO_SystemClasses,
+  MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations);
+  sh->process_strong_roots(true,   // activate StrongRootsScope
+                           SharedHeap::SO_None,
                            &GenMarkSweep::follow_root_closure,
-                           &GenMarkSweep::follow_klass_closure);
+                           &GenMarkSweep::follow_cld_closure,
+                           &follow_code_closure);
 
   // Process reference objects found during marking
   ReferenceProcessor* rp = GenMarkSweep::ref_processor();
@@ -304,13 +306,15 @@
 
   SharedHeap* sh = SharedHeap::heap();
 
-  // Need cleared claim bits for the strong roots processing
+  // Need cleared claim bits for the roots processing
   ClassLoaderDataGraph::clear_claimed_marks();
 
-  sh->process_strong_roots(true,  // activate StrongRootsScope
-                           SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache,
-                           &GenMarkSweep::adjust_pointer_closure,
-                           &GenMarkSweep::adjust_klass_closure);
+  CodeBlobToOopClosure adjust_code_closure(&GenMarkSweep::adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations);
+  sh->process_all_roots(true,  // activate StrongRootsScope
+                        SharedHeap::SO_AllCodeCache,
+                        &GenMarkSweep::adjust_pointer_closure,
+                        &GenMarkSweep::adjust_cld_closure,
+                        &adjust_code_closure);
 
   assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
   g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -25,7 +25,28 @@
 #include "precompiled.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
+#include "gc_implementation/g1/g1ParScanThreadState.hpp"
 
 G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1,  G1ParScanThreadState* par_scan_state) :
   G1ParClosureSuper(g1, par_scan_state), _scanned_klass(NULL),
   _cm(_g1->concurrent_mark()) {}
+
+G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1) :
+  _g1(g1), _par_scan_state(NULL), _worker_id(UINT_MAX) { }
+
+G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
+  _g1(g1), _par_scan_state(NULL),
+  _worker_id(UINT_MAX) {
+  set_par_scan_thread_state(par_scan_state);
+}
+
+void G1ParClosureSuper::set_par_scan_thread_state(G1ParScanThreadState* par_scan_state) {
+  assert(_par_scan_state == NULL, "_par_scan_state must only be set once");
+  assert(par_scan_state != NULL, "Must set par_scan_state to non-NULL.");
+
+  _par_scan_state = par_scan_state;
+  _worker_id = par_scan_state->queue_num();
+
+  assert(_worker_id < MAX2((uint)ParallelGCThreads, 1u),
+         err_msg("The given worker id %u must be less than the number of threads %u", _worker_id, MAX2((uint)ParallelGCThreads, 1u)));
+}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -25,6 +25,8 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
 
+#include "memory/iterator.hpp"
+
 class HeapRegion;
 class G1CollectedHeap;
 class G1RemSet;
@@ -51,8 +53,13 @@
   G1ParScanThreadState* _par_scan_state;
   uint _worker_id;
 public:
+  // Initializes the instance, leaving _par_scan_state uninitialized. Must be done
+  // later using the set_par_scan_thread_state() method.
+  G1ParClosureSuper(G1CollectedHeap* g1);
   G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
   bool apply_to_weak_ref_discovered_field() { return true; }
+
+  void set_par_scan_thread_state(G1ParScanThreadState* par_scan_state);
 };
 
 class G1ParPushHeapRSClosure : public G1ParClosureSuper {
@@ -68,9 +75,8 @@
 
 class G1ParScanClosure : public G1ParClosureSuper {
 public:
-  G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
-    G1ParClosureSuper(g1, par_scan_state)
-  {
+  G1ParScanClosure(G1CollectedHeap* g1, ReferenceProcessor* rp) :
+    G1ParClosureSuper(g1) {
     assert(_ref_processor == NULL, "sanity");
     _ref_processor = rp;
   }
@@ -102,7 +108,7 @@
   template <class T> void do_klass_barrier(T* p, oop new_obj);
 };
 
-template <G1Barrier barrier, bool do_mark_object>
+template <G1Barrier barrier, G1Mark do_mark_object>
 class G1ParCopyClosure : public G1ParCopyHelper {
 private:
   template <class T> void do_oop_work(T* p);
@@ -117,19 +123,19 @@
   template <class T> void do_oop_nv(T* p) { do_oop_work(p); }
   virtual void do_oop(oop* p)       { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
+
+  G1CollectedHeap*      g1()  { return _g1; };
+  G1ParScanThreadState* pss() { return _par_scan_state; }
+  ReferenceProcessor*   rp()  { return _ref_processor; };
 };
 
-typedef G1ParCopyClosure<G1BarrierNone, false> G1ParScanExtRootClosure;
-typedef G1ParCopyClosure<G1BarrierKlass, false> G1ParScanMetadataClosure;
-
-
-typedef G1ParCopyClosure<G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
-typedef G1ParCopyClosure<G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure;
-
+typedef G1ParCopyClosure<G1BarrierNone,  G1MarkNone>             G1ParScanExtRootClosure;
+typedef G1ParCopyClosure<G1BarrierNone,  G1MarkFromRoot>         G1ParScanAndMarkExtRootClosure;
+typedef G1ParCopyClosure<G1BarrierNone,  G1MarkPromotedFromRoot> G1ParScanAndMarkWeakExtRootClosure;
 // We use a separate closure to handle references during evacuation
 // failure processing.
 
-typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
+typedef G1ParCopyClosure<G1BarrierEvac, G1MarkNone> G1ParScanHeapEvacFailureClosure;
 
 class FilterIntoCSClosure: public ExtendedOopClosure {
   G1CollectedHeap* _g1;
@@ -160,10 +166,11 @@
 };
 
 // Closure for iterating over object fields during concurrent marking
-class G1CMOopClosure : public ExtendedOopClosure {
+class G1CMOopClosure : public MetadataAwareOopClosure {
+protected:
+  ConcurrentMark*    _cm;
 private:
   G1CollectedHeap*   _g1h;
-  ConcurrentMark*    _cm;
   CMTask*            _task;
 public:
   G1CMOopClosure(G1CollectedHeap* g1h, ConcurrentMark* cm, CMTask* task);
@@ -173,7 +180,7 @@
 };
 
 // Closure to scan the root regions during concurrent marking
-class G1RootRegionScanClosure : public ExtendedOopClosure {
+class G1RootRegionScanClosure : public MetadataAwareOopClosure {
 private:
   G1CollectedHeap* _g1h;
   ConcurrentMark*  _cm;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -28,9 +28,11 @@
 #include "gc_implementation/g1/concurrentMark.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.hpp"
 #include "gc_implementation/g1/g1OopClosures.hpp"
+#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
 #include "gc_implementation/g1/g1RemSet.hpp"
 #include "gc_implementation/g1/g1RemSet.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
+#include "memory/iterator.inline.hpp"
 #include "runtime/prefetch.inline.hpp"
 
 /*
@@ -107,10 +109,6 @@
 
 template <class T>
 inline void G1CMOopClosure::do_oop_nv(T* p) {
-  assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
-  assert(!_g1h->is_on_master_free_list(
-                    _g1h->heap_region_containing((HeapWord*) p)), "invariant");
-
   oop obj = oopDesc::load_decode_heap_oop(p);
   if (_cm->verbose_high()) {
     gclog_or_tty->print_cr("[%u] we're looking at location "
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/g1OopClosures.inline.hpp"
+#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
+#include "oops/oop.inline.hpp"
+#include "oops/oop.pcgc.inline.hpp"
+#include "runtime/prefetch.inline.hpp"
+
+G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
+  : _g1h(g1h),
+    _refs(g1h->task_queue(queue_num)),
+    _dcq(&g1h->dirty_card_queue_set()),
+    _ct_bs(g1h->g1_barrier_set()),
+    _g1_rem(g1h->g1_rem_set()),
+    _hash_seed(17), _queue_num(queue_num),
+    _term_attempts(0),
+    _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
+    _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
+    _age_table(false), _scanner(g1h, rp),
+    _strong_roots_time(0), _term_time(0),
+    _alloc_buffer_waste(0), _undo_waste(0) {
+  _scanner.set_par_scan_thread_state(this);
+  // we allocate G1YoungSurvRateNumRegions plus one entries, since
+  // we "sacrifice" entry 0 to keep track of surviving bytes for
+  // non-young regions (where the age is -1)
+  // We also add a few elements at the beginning and at the end in
+  // an attempt to eliminate cache contention
+  uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
+  uint array_length = PADDING_ELEM_NUM +
+                      real_length +
+                      PADDING_ELEM_NUM;
+  _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
+  if (_surviving_young_words_base == NULL)
+    vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
+                          "Not enough space for young surv histo.");
+  _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
+  memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
+
+  _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
+  _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
+
+  _start = os::elapsedTime();
+}
+
+G1ParScanThreadState::~G1ParScanThreadState() {
+  retire_alloc_buffers();
+  FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
+}
+
+void
+G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
+{
+  st->print_raw_cr("GC Termination Stats");
+  st->print_raw_cr("     elapsed  --strong roots-- -------termination-------"
+                   " ------waste (KiB)------");
+  st->print_raw_cr("thr     ms        ms      %        ms      %    attempts"
+                   "  total   alloc    undo");
+  st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
+                   " ------- ------- -------");
+}
+
+void
+G1ParScanThreadState::print_termination_stats(int i,
+                                              outputStream* const st) const
+{
+  const double elapsed_ms = elapsed_time() * 1000.0;
+  const double s_roots_ms = strong_roots_time() * 1000.0;
+  const double term_ms    = term_time() * 1000.0;
+  st->print_cr("%3d %9.2f %9.2f %6.2f "
+               "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
+               SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
+               i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
+               term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
+               (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
+               alloc_buffer_waste() * HeapWordSize / K,
+               undo_waste() * HeapWordSize / K);
+}
+
+#ifdef ASSERT
+bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
+  assert(ref != NULL, "invariant");
+  assert(UseCompressedOops, "sanity");
+  assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, p2i(ref)));
+  oop p = oopDesc::load_decode_heap_oop(ref);
+  assert(_g1h->is_in_g1_reserved(p),
+         err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)));
+  return true;
+}
+
+bool G1ParScanThreadState::verify_ref(oop* ref) const {
+  assert(ref != NULL, "invariant");
+  if (has_partial_array_mask(ref)) {
+    // Must be in the collection set--it's already been copied.
+    oop p = clear_partial_array_mask(ref);
+    assert(_g1h->obj_in_cs(p),
+           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)));
+  } else {
+    oop p = oopDesc::load_decode_heap_oop(ref);
+    assert(_g1h->is_in_g1_reserved(p),
+           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)));
+  }
+  return true;
+}
+
+bool G1ParScanThreadState::verify_task(StarTask ref) const {
+  if (ref.is_narrow()) {
+    return verify_ref((narrowOop*) ref);
+  } else {
+    return verify_ref((oop*) ref);
+  }
+}
+#endif // ASSERT
+
+void G1ParScanThreadState::trim_queue() {
+  assert(_evac_failure_cl != NULL, "not set");
+
+  StarTask ref;
+  do {
+    // Drain the overflow stack first, so other threads can steal.
+    while (_refs->pop_overflow(ref)) {
+      dispatch_reference(ref);
+    }
+
+    while (_refs->pop_local(ref)) {
+      dispatch_reference(ref);
+    }
+  } while (!_refs->is_empty());
+}
+
+oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
+  size_t word_sz = old->size();
+  HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
+  // +1 to make the -1 indexes valid...
+  int       young_index = from_region->young_index_in_cset()+1;
+  assert( (from_region->is_young() && young_index >  0) ||
+         (!from_region->is_young() && young_index == 0), "invariant" );
+  G1CollectorPolicy* g1p = _g1h->g1_policy();
+  markOop m = old->mark();
+  int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
+                                           : m->age();
+  GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
+                                                             word_sz);
+  HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
+#ifndef PRODUCT
+  // Should this evacuation fail?
+  if (_g1h->evacuation_should_fail()) {
+    if (obj_ptr != NULL) {
+      undo_allocation(alloc_purpose, obj_ptr, word_sz);
+      obj_ptr = NULL;
+    }
+  }
+#endif // !PRODUCT
+
+  if (obj_ptr == NULL) {
+    // This will either forward-to-self, or detect that someone else has
+    // installed a forwarding pointer.
+    return _g1h->handle_evacuation_failure_par(this, old);
+  }
+
+  oop obj = oop(obj_ptr);
+
+  // We're going to allocate linearly, so might as well prefetch ahead.
+  Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
+
+  oop forward_ptr = old->forward_to_atomic(obj);
+  if (forward_ptr == NULL) {
+    Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
+
+    // alloc_purpose is just a hint to allocate() above, recheck the type of region
+    // we actually allocated from and update alloc_purpose accordingly
+    HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr);
+    alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
+
+    if (g1p->track_object_age(alloc_purpose)) {
+      // We could simply do obj->incr_age(). However, this causes a
+      // performance issue. obj->incr_age() will first check whether
+      // the object has a displaced mark by checking its mark word;
+      // getting the mark word from the new location of the object
+      // stalls. So, given that we already have the mark word and we
+      // are about to install it anyway, it's better to increase the
+      // age on the mark word, when the object does not have a
+      // displaced mark word. We're not expecting many objects to have
+      // a displaced marked word, so that case is not optimized
+      // further (it could be...) and we simply call obj->incr_age().
+
+      if (m->has_displaced_mark_helper()) {
+        // in this case, we have to install the mark word first,
+        // otherwise obj looks to be forwarded (the old mark word,
+        // which contains the forward pointer, was copied)
+        obj->set_mark(m);
+        obj->incr_age();
+      } else {
+        m = m->incr_age();
+        obj->set_mark(m);
+      }
+      age_table()->add(obj, word_sz);
+    } else {
+      obj->set_mark(m);
+    }
+
+    if (G1StringDedup::is_enabled()) {
+      G1StringDedup::enqueue_from_evacuation(from_region->is_young(),
+                                             to_region->is_young(),
+                                             queue_num(),
+                                             obj);
+    }
+
+    size_t* surv_young_words = surviving_young_words();
+    surv_young_words[young_index] += word_sz;
+
+    if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
+      // We keep track of the next start index in the length field of
+      // the to-space object. The actual length can be found in the
+      // length field of the from-space object.
+      arrayOop(obj)->set_length(0);
+      oop* old_p = set_partial_array_mask(old);
+      push_on_queue(old_p);
+    } else {
+      // No point in using the slower heap_region_containing() method,
+      // given that we know obj is in the heap.
+      _scanner.set_region(_g1h->heap_region_containing_raw(obj));
+      obj->oop_iterate_backwards(&_scanner);
+    }
+  } else {
+    undo_allocation(alloc_purpose, obj_ptr, word_sz);
+    obj = forward_ptr;
+  }
+  return obj;
+}
+
+HeapWord* G1ParScanThreadState::allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
+  HeapWord* obj = NULL;
+  size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
+  if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
+    G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
+    add_to_alloc_buffer_waste(alloc_buf->words_remaining());
+    alloc_buf->retire(false /* end_of_gc */, false /* retain */);
+
+    HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
+    if (buf == NULL) {
+      return NULL; // Let caller handle allocation failure.
+    }
+    // Otherwise.
+    alloc_buf->set_word_size(gclab_word_size);
+    alloc_buf->set_buf(buf);
+
+    obj = alloc_buf->allocate(word_sz);
+    assert(obj != NULL, "buffer was definitely big enough...");
+  } else {
+    obj = _g1h->par_allocate_during_gc(purpose, word_sz);
+  }
+  return obj;
+}
+
+void G1ParScanThreadState::undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
+  if (alloc_buffer(purpose)->contains(obj)) {
+    assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
+           "should contain whole object");
+    alloc_buffer(purpose)->undo_allocation(obj, word_sz);
+  } else {
+    CollectedHeap::fill_with_object(obj, word_sz);
+    add_to_undo_waste(word_sz);
+  }
+}
+
+HeapWord* G1ParScanThreadState::allocate(GCAllocPurpose purpose, size_t word_sz) {
+  HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
+  if (obj != NULL) {
+    return obj;
+  }
+  return allocate_slow(purpose, word_sz);
+}
+
+void G1ParScanThreadState::retire_alloc_buffers() {
+  for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
+    size_t waste = _alloc_buffers[ap]->words_remaining();
+    add_to_alloc_buffer_waste(waste);
+    _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
+                                               true /* end_of_gc */,
+                                               false /* retain */);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP
+
+#include "gc_implementation/g1/dirtyCardQueue.hpp"
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
+#include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/g1OopClosures.hpp"
+#include "gc_implementation/g1/g1RemSet.hpp"
+#include "gc_implementation/shared/ageTable.hpp"
+#include "memory/allocation.hpp"
+#include "oops/oop.hpp"
+
+class HeapRegion;
+class outputStream;
+
+class G1ParScanThreadState : public StackObj {
+ private:
+  G1CollectedHeap* _g1h;
+  RefToScanQueue*  _refs;
+  DirtyCardQueue   _dcq;
+  G1SATBCardTableModRefBS* _ct_bs;
+  G1RemSet* _g1_rem;
+
+  G1ParGCAllocBuffer  _surviving_alloc_buffer;
+  G1ParGCAllocBuffer  _tenured_alloc_buffer;
+  G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
+  ageTable            _age_table;
+
+  G1ParScanClosure    _scanner;
+
+  size_t           _alloc_buffer_waste;
+  size_t           _undo_waste;
+
+  OopsInHeapRegionClosure*      _evac_failure_cl;
+
+  int  _hash_seed;
+  uint _queue_num;
+
+  size_t _term_attempts;
+
+  double _start;
+  double _start_strong_roots;
+  double _strong_roots_time;
+  double _start_term;
+  double _term_time;
+
+  // Map from young-age-index (0 == not young, 1 is youngest) to
+  // surviving words. base is what we get back from the malloc call
+  size_t* _surviving_young_words_base;
+  // this points into the array, as we use the first few entries for padding
+  size_t* _surviving_young_words;
+
+#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
+
+  void   add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
+
+  void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
+
+  DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
+  G1SATBCardTableModRefBS* ctbs()                { return _ct_bs; }
+
+  template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
+
+  template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
+    // If the new value of the field points to the same region or
+    // is the to-space, we don't need to include it in the Rset updates.
+    if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
+      size_t card_index = ctbs()->index_for(p);
+      // If the card hasn't been added to the buffer, do it.
+      if (ctbs()->mark_card_deferred(card_index)) {
+        dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
+      }
+    }
+  }
+
+ public:
+  G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
+  ~G1ParScanThreadState();
+
+  ageTable*         age_table()       { return &_age_table;       }
+
+  G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
+    return _alloc_buffers[purpose];
+  }
+
+  size_t alloc_buffer_waste() const              { return _alloc_buffer_waste; }
+  size_t undo_waste() const                      { return _undo_waste; }
+
+#ifdef ASSERT
+  bool queue_is_empty() const { return _refs->is_empty(); }
+
+  bool verify_ref(narrowOop* ref) const;
+  bool verify_ref(oop* ref) const;
+  bool verify_task(StarTask ref) const;
+#endif // ASSERT
+
+  template <class T> void push_on_queue(T* ref) {
+    assert(verify_ref(ref), "sanity");
+    _refs->push(ref);
+  }
+
+  template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
+
+ private:
+
+  inline HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz);
+  inline HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz);
+  inline void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz);
+
+ public:
+
+  void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
+    _evac_failure_cl = evac_failure_cl;
+  }
+
+  OopsInHeapRegionClosure* evac_failure_closure() { return _evac_failure_cl; }
+
+  int* hash_seed() { return &_hash_seed; }
+  uint queue_num() { return _queue_num; }
+
+  size_t term_attempts() const  { return _term_attempts; }
+  void note_term_attempt() { _term_attempts++; }
+
+  void start_strong_roots() {
+    _start_strong_roots = os::elapsedTime();
+  }
+  void end_strong_roots() {
+    _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
+  }
+  double strong_roots_time() const { return _strong_roots_time; }
+
+  void start_term_time() {
+    note_term_attempt();
+    _start_term = os::elapsedTime();
+  }
+  void end_term_time() {
+    _term_time += (os::elapsedTime() - _start_term);
+  }
+  double term_time() const { return _term_time; }
+
+  double elapsed_time() const {
+    return os::elapsedTime() - _start;
+  }
+
+  static void print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
+  void print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
+
+  size_t* surviving_young_words() {
+    // We add on to hide entry 0 which accumulates surviving words for
+    // age -1 regions (i.e. non-young ones)
+    return _surviving_young_words;
+  }
+
+ private:
+  void retire_alloc_buffers();
+
+  #define G1_PARTIAL_ARRAY_MASK 0x2
+
+  inline bool has_partial_array_mask(oop* ref) const {
+    return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
+  }
+
+  // We never encode partial array oops as narrowOop*, so return false immediately.
+  // This allows the compiler to create optimized code when popping references from
+  // the work queue.
+  inline bool has_partial_array_mask(narrowOop* ref) const {
+    assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
+    return false;
+  }
+
+  // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
+  // We always encode partial arrays as regular oop, to allow the
+  // specialization for has_partial_array_mask() for narrowOops above.
+  // This means that unintentional use of this method with narrowOops are caught
+  // by the compiler.
+  inline oop* set_partial_array_mask(oop obj) const {
+    assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
+    return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
+  }
+
+  inline oop clear_partial_array_mask(oop* ref) const {
+    return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
+  }
+
+  inline void do_oop_partial_array(oop* p);
+
+  // This method is applied to the fields of the objects that have just been copied.
+  template <class T> inline void do_oop_evac(T* p, HeapRegion* from);
+
+  template <class T> inline void deal_with_reference(T* ref_to_scan);
+
+  inline void dispatch_reference(StarTask ref);
+ public:
+
+  oop copy_to_survivor_space(oop const obj);
+
+  void trim_queue();
+
+  inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP
+
+#include "gc_implementation/g1/g1ParScanThreadState.hpp"
+#include "gc_implementation/g1/g1RemSet.inline.hpp"
+#include "oops/oop.inline.hpp"
+
+template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) {
+  if (!from->is_survivor()) {
+    _g1_rem->par_write_ref(from, p, tid);
+  }
+}
+
+template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) {
+  if (G1DeferredRSUpdate) {
+    deferred_rs_update(from, p, tid);
+  } else {
+    immediate_rs_update(from, p, tid);
+  }
+}
+
+template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) {
+  assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
+         "Reference should not be NULL here as such are never pushed to the task queue.");
+  oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+
+  // Although we never intentionally push references outside of the collection
+  // set, due to (benign) races in the claim mechanism during RSet scanning more
+  // than one thread might claim the same card. So the same card may be
+  // processed multiple times. So redo this check.
+  if (_g1h->in_cset_fast_test(obj)) {
+    oop forwardee;
+    if (obj->is_forwarded()) {
+      forwardee = obj->forwardee();
+    } else {
+      forwardee = copy_to_survivor_space(obj);
+    }
+    assert(forwardee != NULL, "forwardee should not be NULL");
+    oopDesc::encode_store_heap_oop(p, forwardee);
+  }
+
+  assert(obj != NULL, "Must be");
+  update_rs(from, p, queue_num());
+}
+
+inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
+  assert(has_partial_array_mask(p), "invariant");
+  oop from_obj = clear_partial_array_mask(p);
+
+  assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
+  assert(from_obj->is_objArray(), "must be obj array");
+  objArrayOop from_obj_array = objArrayOop(from_obj);
+  // The from-space object contains the real length.
+  int length                 = from_obj_array->length();
+
+  assert(from_obj->is_forwarded(), "must be forwarded");
+  oop to_obj                 = from_obj->forwardee();
+  assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
+  objArrayOop to_obj_array   = objArrayOop(to_obj);
+  // We keep track of the next start index in the length field of the
+  // to-space object.
+  int next_index             = to_obj_array->length();
+  assert(0 <= next_index && next_index < length,
+         err_msg("invariant, next index: %d, length: %d", next_index, length));
+
+  int start                  = next_index;
+  int end                    = length;
+  int remainder              = end - start;
+  // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
+  if (remainder > 2 * ParGCArrayScanChunk) {
+    end = start + ParGCArrayScanChunk;
+    to_obj_array->set_length(end);
+    // Push the remainder before we process the range in case another
+    // worker has run out of things to do and can steal it.
+    oop* from_obj_p = set_partial_array_mask(from_obj);
+    push_on_queue(from_obj_p);
+  } else {
+    assert(length == end, "sanity");
+    // We'll process the final range for this object. Restore the length
+    // so that the heap remains parsable in case of evacuation failure.
+    to_obj_array->set_length(end);
+  }
+  _scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
+  // Process indexes [start,end). It will also process the header
+  // along with the first chunk (i.e., the chunk with start == 0).
+  // Note that at this point the length field of to_obj_array is not
+  // correct given that we are using it to keep track of the next
+  // start index. oop_iterate_range() (thankfully!) ignores the length
+  // field and only relies on the start / end parameters.  It does
+  // however return the size of the object which will be incorrect. So
+  // we have to ignore it even if we wanted to use it.
+  to_obj_array->oop_iterate_range(&_scanner, start, end);
+}
+
+template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
+  if (!has_partial_array_mask(ref_to_scan)) {
+    // Note: we can use "raw" versions of "region_containing" because
+    // "obj_to_scan" is definitely in the heap, and is not in a
+    // humongous region.
+    HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
+    do_oop_evac(ref_to_scan, r);
+  } else {
+    do_oop_partial_array((oop*)ref_to_scan);
+  }
+}
+
+inline void G1ParScanThreadState::dispatch_reference(StarTask ref) {
+  assert(verify_task(ref), "sanity");
+  if (ref.is_narrow()) {
+    deal_with_reference((narrowOop*)ref);
+  } else {
+    deal_with_reference((oop*)ref);
+  }
+}
+
+void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
+  StarTask stolen_task;
+  while (task_queues->steal(queue_num(), hash_seed(), stolen_task)) {
+    assert(verify_task(stolen_task), "sanity");
+    dispatch_reference(stolen_task);
+
+    // We've just processed a reference and we might have made
+    // available new entries on the queues. So we have to make sure
+    // we drain the queues as necessary.
+    trim_queue();
+  }
+}
+
+#endif /* SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP */
+
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_INLINE_HPP
 
 #include "gc_implementation/g1/g1RemSet.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "oops/oop.inline.hpp"
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -66,6 +66,17 @@
   }
 }
 
+void G1SATBCardTableModRefBS::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
+  if (!dest_uninitialized) {
+    write_ref_array_pre_work(dst, count);
+  }
+}
+void G1SATBCardTableModRefBS::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
+  if (!dest_uninitialized) {
+    write_ref_array_pre_work(dst, count);
+  }
+}
+
 bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
   jbyte val = _byte_map[card_index];
   // It's already processed
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -86,16 +86,8 @@
   }
 
   template <class T> void write_ref_array_pre_work(T* dst, int count);
-  virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
-    if (!dest_uninitialized) {
-      write_ref_array_pre_work(dst, count);
-    }
-  }
-  virtual void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
-    if (!dest_uninitialized) {
-      write_ref_array_pre_work(dst, count);
-    }
-  }
+  virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized);
+  virtual void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized);
 
 /*
    Claimed and deferred bits are used together in G1 during the evacuation
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -30,14 +30,21 @@
 // non-virtually, using a mechanism defined in this file.  Extend these
 // macros in the obvious way to add specializations for new closures.
 
-// Forward declarations.
 enum G1Barrier {
   G1BarrierNone,
   G1BarrierEvac,
   G1BarrierKlass
 };
 
-template<G1Barrier barrier, bool do_mark_object>
+enum G1Mark {
+  G1MarkNone,
+  G1MarkFromRoot,
+  G1MarkPromotedFromRoot
+};
+
+// Forward declarations.
+
+template<G1Barrier barrier, G1Mark do_mark_object>
 class G1ParCopyClosure;
 
 class G1ParScanClosure;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -30,6 +30,7 @@
 #include "gc_implementation/g1/heapRegion.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/shared/liveRange.hpp"
 #include "memory/genOopClosures.inline.hpp"
 #include "memory/iterator.hpp"
 #include "memory/space.inline.hpp"
@@ -61,7 +62,7 @@
                                HeapRegion* hr,
                                HeapWord* cur, HeapWord* top) {
   oop cur_oop = oop(cur);
-  int oop_size = cur_oop->size();
+  size_t oop_size = hr->block_size(cur);
   HeapWord* next_obj = cur + oop_size;
   while (next_obj < top) {
     // Keep filtering the remembered set.
@@ -72,7 +73,7 @@
     }
     cur = next_obj;
     cur_oop = oop(cur);
-    oop_size = cur_oop->size();
+    oop_size = hr->block_size(cur);
     next_obj = cur + oop_size;
   }
   return cur;
@@ -82,7 +83,7 @@
                                       HeapWord* bottom,
                                       HeapWord* top) {
   G1CollectedHeap* g1h = _g1;
-  int oop_size;
+  size_t oop_size;
   ExtendedOopClosure* cl2 = NULL;
 
   FilterIntoCSClosure intoCSFilt(this, g1h, _cl);
@@ -102,7 +103,7 @@
   if (!g1h->is_obj_dead(oop(bottom), _hr)) {
     oop_size = oop(bottom)->oop_iterate(cl2, mr);
   } else {
-    oop_size = oop(bottom)->size();
+    oop_size = _hr->block_size(bottom);
   }
 
   bottom += oop_size;
@@ -374,7 +375,7 @@
   // region.
   hr_clear(false /*par*/, false /*clear_space*/);
   set_top(bottom());
-  set_saved_mark();
+  record_top_and_timestamp();
 
   assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
 }
@@ -394,38 +395,11 @@
   return NULL;
 }
 
-void HeapRegion::save_marks() {
-  set_saved_mark();
-}
-
-void HeapRegion::oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl) {
-  HeapWord* p = mr.start();
-  HeapWord* e = mr.end();
-  oop obj;
-  while (p < e) {
-    obj = oop(p);
-    p += obj->oop_iterate(cl);
-  }
-  assert(p == e, "bad memregion: doesn't end on obj boundary");
-}
-
-#define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
-void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
-  ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl);              \
-}
-SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN)
-
-
-void HeapRegion::oop_before_save_marks_iterate(ExtendedOopClosure* cl) {
-  oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
-}
-
 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
                                                     bool during_conc_mark) {
   // We always recreate the prev marking info and we'll explicitly
   // mark all objects we find to be self-forwarded on the prev
   // bitmap. So all objects need to be below PTAMS.
-  _prev_top_at_mark_start = top();
   _prev_marked_bytes = 0;
 
   if (during_initial_mark) {
@@ -449,6 +423,7 @@
   assert(0 <= marked_bytes && marked_bytes <= used(),
          err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT,
                  marked_bytes, used()));
+  _prev_top_at_mark_start = top();
   _prev_marked_bytes = marked_bytes;
 }
 
@@ -476,7 +451,7 @@
     } else if (!g1h->is_obj_dead(obj)) {
       cl->do_object(obj);
     }
-    cur += obj->size();
+    cur += block_size(cur);
   }
   return NULL;
 }
@@ -548,7 +523,7 @@
       return cur;
     }
     // Otherwise...
-    next = (cur + obj->size());
+    next = cur + block_size(cur);
   }
 
   // If we finish the above loop...We have a parseable object that
@@ -556,10 +531,9 @@
   // inside or spans the entire region.
 
   assert(obj == oop(cur), "sanity");
-  assert(cur <= start &&
-         obj->klass_or_null() != NULL &&
-         (cur + obj->size()) > start,
-         "Loop postcondition");
+  assert(cur <= start, "Loop postcondition");
+  assert(obj->klass_or_null() != NULL, "Loop postcondition");
+  assert((cur + block_size(cur)) > start, "Loop postcondition");
 
   if (!g1h->is_obj_dead(obj)) {
     obj->oop_iterate(cl, mr);
@@ -573,7 +547,7 @@
     };
 
     // Otherwise:
-    next = (cur + obj->size());
+    next = cur + block_size(cur);
 
     if (!g1h->is_obj_dead(obj)) {
       if (next < end || !obj->is_objArray()) {
@@ -928,10 +902,11 @@
   size_t object_num = 0;
   while (p < top()) {
     oop obj = oop(p);
-    size_t obj_size = obj->size();
+    size_t obj_size = block_size(p);
     object_num += 1;
 
-    if (is_humongous != g1->isHumongous(obj_size)) {
+    if (is_humongous != g1->isHumongous(obj_size) &&
+        !g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects.
       gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
                              SIZE_FORMAT" words) in a %shumongous region",
                              p, g1->isHumongous(obj_size) ? "" : "non-",
@@ -942,7 +917,9 @@
 
     // If it returns false, verify_for_object() will output the
     // appropriate messasge.
-    if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) {
+    if (do_bot_verify &&
+        !g1->is_obj_dead(obj, this) &&
+        !_offsets.verify_for_object(p, obj_size)) {
       *failures = true;
       return;
     }
@@ -950,7 +927,10 @@
     if (!g1->is_obj_dead_cond(obj, this, vo)) {
       if (obj->is_oop()) {
         Klass* klass = obj->klass();
-        if (!klass->is_metaspace_object()) {
+        bool is_metaspace_object = Metaspace::contains(klass) ||
+                                   (vo == VerifyOption_G1UsePrevMarking &&
+                                   ClassLoaderDataGraph::unload_list_contains(klass));
+        if (!is_metaspace_object) {
           gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
                                  "not metadata", klass, (void *)obj);
           *failures = true;
@@ -1064,7 +1044,9 @@
 // away eventually.
 
 void G1OffsetTableContigSpace::clear(bool mangle_space) {
-  ContiguousSpace::clear(mangle_space);
+  set_top(bottom());
+  set_saved_mark_word(bottom());
+  CompactibleSpace::clear(mangle_space);
   _offsets.zero_bottom_entry();
   _offsets.initialize_threshold();
 }
@@ -1102,10 +1084,10 @@
   if (_gc_time_stamp < g1h->get_gc_time_stamp())
     return top();
   else
-    return ContiguousSpace::saved_mark_word();
+    return Space::saved_mark_word();
 }
 
-void G1OffsetTableContigSpace::set_saved_mark() {
+void G1OffsetTableContigSpace::record_top_and_timestamp() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
 
@@ -1117,7 +1099,7 @@
     // of region. If it does so after _gc_time_stamp = ..., then it
     // will pick up the right saved_mark_word() as the high water mark
     // of the region. Either way, the behavior will be correct.
-    ContiguousSpace::set_saved_mark();
+    Space::set_saved_mark_word(top());
     OrderAccess::storestore();
     _gc_time_stamp = curr_gc_time_stamp;
     // No need to do another barrier to flush the writes above. If
@@ -1128,6 +1110,26 @@
   }
 }
 
+void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) {
+  object_iterate(blk);
+}
+
+void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) {
+  HeapWord* p = bottom();
+  while (p < top()) {
+    if (block_is_obj(p)) {
+      blk->do_object(oop(p));
+    }
+    p += block_size(p);
+  }
+}
+
+#define block_is_always_obj(q) true
+void G1OffsetTableContigSpace::prepare_for_compaction(CompactPoint* cp) {
+  SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size);
+}
+#undef block_is_always_obj
+
 G1OffsetTableContigSpace::
 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
                          MemRegion mr) :
@@ -1137,7 +1139,8 @@
 {
   _offsets.set_space(this);
   // false ==> we'll do the clearing if there's clearing to be done.
-  ContiguousSpace::initialize(mr, false, SpaceDecorator::Mangle);
+  CompactibleSpace::initialize(mr, false, SpaceDecorator::Mangle);
+  _top = bottom();
   _offsets.zero_bottom_entry();
   _offsets.initialize_threshold();
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -46,8 +46,6 @@
 // The solution is to remove this method from the definition
 // of a Space.
 
-class CompactibleSpace;
-class ContiguousSpace;
 class HeapRegionRemSet;
 class HeapRegionRemSetIterator;
 class HeapRegion;
@@ -125,9 +123,9 @@
 // the regions anyway) and at the end of a Full GC. The current scheme
 // that uses sequential unsigned ints will fail only if we have 4b
 // evacuation pauses between two cleanups, which is _highly_ unlikely.
-
-class G1OffsetTableContigSpace: public ContiguousSpace {
+class G1OffsetTableContigSpace: public CompactibleSpace {
   friend class VMStructs;
+  HeapWord* _top;
  protected:
   G1BlockOffsetArrayContigSpace _offsets;
   Mutex _par_alloc_lock;
@@ -144,11 +142,32 @@
   G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
                            MemRegion mr);
 
+  void set_top(HeapWord* value) { _top = value; }
+  HeapWord* top() const { return _top; }
+
+ protected:
+  HeapWord** top_addr() { return &_top; }
+  // Allocation helpers (return NULL if full).
+  inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
+  inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
+
+ public:
+  void reset_after_compaction() { set_top(compaction_top()); }
+
+  size_t used() const { return byte_size(bottom(), top()); }
+  size_t free() const { return byte_size(top(), end()); }
+  bool is_free_block(const HeapWord* p) const { return p >= top(); }
+
+  MemRegion used_region() const { return MemRegion(bottom(), top()); }
+
+  void object_iterate(ObjectClosure* blk);
+  void safe_object_iterate(ObjectClosure* blk);
+
   void set_bottom(HeapWord* value);
   void set_end(HeapWord* value);
 
   virtual HeapWord* saved_mark_word() const;
-  virtual void set_saved_mark();
+  void record_top_and_timestamp();
   void reset_gc_time_stamp() { _gc_time_stamp = 0; }
   unsigned get_gc_time_stamp() { return _gc_time_stamp; }
 
@@ -168,6 +187,8 @@
   HeapWord* block_start(const void* p);
   HeapWord* block_start_const(const void* p) const;
 
+  void prepare_for_compaction(CompactPoint* cp);
+
   // Add offset table update.
   virtual HeapWord* allocate(size_t word_size);
   HeapWord* par_allocate(size_t word_size);
@@ -202,10 +223,6 @@
     ContinuesHumongous
   };
 
-  // Requires that the region "mr" be dense with objects, and begin and end
-  // with an object.
-  void oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl);
-
   // The remembered set for this region.
   // (Might want to make this "inline" later, to avoid some alloc failure
   // issues.)
@@ -230,11 +247,9 @@
   bool _evacuation_failed;
 
   // A heap region may be a member one of a number of special subsets, each
-  // represented as linked lists through the field below.  Currently, these
-  // sets include:
+  // represented as linked lists through the field below.  Currently, there
+  // is only one set:
   //   The collection set.
-  //   The set of allocation regions used in a collection pause.
-  //   Spaces that may contain gray objects.
   HeapRegion* _next_in_special_set;
 
   // next region in the young "generation" region set
@@ -353,14 +368,15 @@
     ParMarkRootClaimValue      = 9
   };
 
-  inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
-    assert(is_young(), "we can only skip BOT updates on young regions");
-    return ContiguousSpace::par_allocate(word_size);
-  }
-  inline HeapWord* allocate_no_bot_updates(size_t word_size) {
-    assert(is_young(), "we can only skip BOT updates on young regions");
-    return ContiguousSpace::allocate(word_size);
-  }
+  // All allocated blocks are occupied by objects in a HeapRegion
+  bool block_is_obj(const HeapWord* p) const;
+
+  // Returns the object size for all valid block starts
+  // and the amount of unallocated words if called on top()
+  size_t block_size(const HeapWord* p) const;
+
+  inline HeapWord* par_allocate_no_bot_updates(size_t word_size);
+  inline HeapWord* allocate_no_bot_updates(size_t word_size);
 
   // If this region is a member of a HeapRegionSeq, the index in that
   // sequence, otherwise -1.
@@ -569,9 +585,6 @@
 
   HeapWord* orig_end() { return _orig_end; }
 
-  // Allows logical separation between objects allocated before and after.
-  void save_marks();
-
   // Reset HR stuff to default values.
   void hr_clear(bool par, bool clear_space, bool locked = false);
   void par_clear();
@@ -580,10 +593,6 @@
   HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
   HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
 
-  // Apply "cl->do_oop" to (the addresses of) all reference fields in objects
-  // allocated in the current region before the last call to "save_mark".
-  void oop_before_save_marks_iterate(ExtendedOopClosure* cl);
-
   // Note the start or end of marking. This tells the heap region
   // that the collector is about to start or has finished (concurrently)
   // marking the heap.
@@ -769,10 +778,6 @@
     _predicted_bytes_to_copy = bytes;
   }
 
-#define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
-  virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
-  SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
-
   virtual CompactibleSpace* next_compaction_space() const;
 
   virtual void reset_after_compaction();
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -26,9 +26,48 @@
 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
 
 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "memory/space.hpp"
+#include "runtime/atomic.inline.hpp"
+
+// This version requires locking.
+inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t size,
+                                                HeapWord* const end_value) {
+  HeapWord* obj = top();
+  if (pointer_delta(end_value, obj) >= size) {
+    HeapWord* new_top = obj + size;
+    set_top(new_top);
+    assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
+    return obj;
+  } else {
+    return NULL;
+  }
+}
+
+// This version is lock-free.
+inline HeapWord* G1OffsetTableContigSpace::par_allocate_impl(size_t size,
+                                                    HeapWord* const end_value) {
+  do {
+    HeapWord* obj = top();
+    if (pointer_delta(end_value, obj) >= size) {
+      HeapWord* new_top = obj + size;
+      HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
+      // result can be one of two:
+      //  the old top value: the exchange succeeded
+      //  otherwise: the new value of the top is returned.
+      if (result == obj) {
+        assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
+        return obj;
+      }
+    } else {
+      return NULL;
+    }
+  } while (true);
+}
 
 inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) {
-  HeapWord* res = ContiguousSpace::allocate(size);
+  HeapWord* res = allocate_impl(size, end());
   if (res != NULL) {
     _offsets.alloc_block(res, size);
   }
@@ -40,12 +79,7 @@
 // this is used for larger LAB allocations only.
 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) {
   MutexLocker x(&_par_alloc_lock);
-  // Given that we take the lock no need to use par_allocate() here.
-  HeapWord* res = ContiguousSpace::allocate(size);
-  if (res != NULL) {
-    _offsets.alloc_block(res, size);
-  }
-  return res;
+  return allocate(size);
 }
 
 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
@@ -57,6 +91,41 @@
   return _offsets.block_start_const(p);
 }
 
+inline bool
+HeapRegion::block_is_obj(const HeapWord* p) const {
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  return !g1h->is_obj_dead(oop(p), this);
+}
+
+inline size_t
+HeapRegion::block_size(const HeapWord *addr) const {
+  // Old regions' dead objects may have dead classes
+  // We need to find the next live object in some other
+  // manner than getting the oop size
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  if (g1h->is_obj_dead(oop(addr), this)) {
+    HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
+        getNextMarkedWordAddress(addr, prev_top_at_mark_start());
+
+    assert(next > addr, "must get the next live object");
+
+    return pointer_delta(next, addr);
+  } else if (addr == top()) {
+    return pointer_delta(end(), addr);
+  }
+  return oop(addr)->size();
+}
+
+inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) {
+  assert(is_young(), "we can only skip BOT updates on young regions");
+  return par_allocate_impl(word_size, end());
+}
+
+inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) {
+  assert(is_young(), "we can only skip BOT updates on young regions");
+  return allocate_impl(word_size, end());
+}
+
 inline void HeapRegion::note_start_of_marking() {
   _next_marked_bytes = 0;
   _next_top_at_mark_start = top();
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -931,7 +931,10 @@
 
 void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
   assert(nm != NULL, "sanity");
-  _code_roots.remove(nm);
+  assert_locked_or_safepoint(CodeCache_lock);
+
+  _code_roots.remove_lock_free(nm);
+
   // Check that there were no duplicates
   guarantee(!_code_roots.contains(nm), "duplicate entry found");
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -285,37 +285,6 @@
   _par_closures[i] = par_closure;
 }
 
-void SATBMarkQueueSet::iterate_closure_all_threads() {
-  for(JavaThread* t = Threads::first(); t; t = t->next()) {
-    t->satb_mark_queue().apply_closure_and_empty(_closure);
-  }
-  shared_satb_queue()->apply_closure_and_empty(_closure);
-}
-
-void SATBMarkQueueSet::par_iterate_closure_all_threads(uint worker) {
-  SharedHeap* sh = SharedHeap::heap();
-  int parity = sh->strong_roots_parity();
-
-  for(JavaThread* t = Threads::first(); t; t = t->next()) {
-    if (t->claim_oops_do(true, parity)) {
-      t->satb_mark_queue().apply_closure_and_empty(_par_closures[worker]);
-    }
-  }
-
-  // We also need to claim the VMThread so that its parity is updated
-  // otherwise the next call to Thread::possibly_parallel_oops_do inside
-  // a StrongRootsScope might skip the VMThread because it has a stale
-  // parity that matches the parity set by the StrongRootsScope
-  //
-  // Whichever worker succeeds in claiming the VMThread gets to do
-  // the shared queue.
-
-  VMThread* vmt = VMThread::vm_thread();
-  if (vmt->claim_oops_do(true, parity)) {
-    shared_satb_queue()->apply_closure_and_empty(_par_closures[worker]);
-  }
-}
-
 bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
                                                               uint worker) {
   BufferNode* nd = NULL;
--- a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -33,7 +33,9 @@
 
 // A ptrQueue whose elements are "oops", pointers to object heads.
 class ObjPtrQueue: public PtrQueue {
+  friend class Threads;
   friend class SATBMarkQueueSet;
+  friend class G1RemarkThreadsClosure;
 
 private:
   // Filter out unwanted entries from the buffer.
@@ -119,13 +121,6 @@
   // closures, one for each parallel GC thread.
   void set_par_closure(int i, ObjectClosure* closure);
 
-  // Apply the registered closure to all entries on each
-  // currently-active buffer and then empty the buffer. It should only
-  // be called serially and at a safepoint.
-  void iterate_closure_all_threads();
-  // Parallel version of the above.
-  void par_iterate_closure_all_threads(uint worker);
-
   // If there exists some completed buffer, pop it, then apply the
   // registered closure to all its elements, and return true.  If no
   // completed buffers exist, return false.
--- a/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -34,6 +34,8 @@
   static_field(HeapRegion, GrainBytes,        size_t)                         \
   static_field(HeapRegion, LogOfHRGrainBytes, int)                            \
                                                                               \
+  nonstatic_field(G1OffsetTableContigSpace, _top,       HeapWord*)            \
+                                                                              \
   nonstatic_field(G1HeapRegionTable, _base,             address)              \
   nonstatic_field(G1HeapRegionTable, _length,           size_t)               \
   nonstatic_field(G1HeapRegionTable, _biased_base,      address)              \
@@ -69,7 +71,8 @@
                                                                               \
   declare_type(G1CollectedHeap, SharedHeap)                                   \
                                                                               \
-  declare_type(HeapRegion, ContiguousSpace)                                   \
+  declare_type(G1OffsetTableContigSpace, CompactibleSpace)                    \
+  declare_type(HeapRegion, G1OffsetTableContigSpace)                          \
   declare_toplevel_type(HeapRegionSeq)                                        \
   declare_toplevel_type(HeapRegionSetBase)                                    \
   declare_toplevel_type(HeapRegionSetCount)                                   \
--- a/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,657 +0,0 @@
-/*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
-#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
-#include "gc_implementation/parNew/asParNewGeneration.hpp"
-#include "gc_implementation/parNew/parNewGeneration.hpp"
-#include "gc_implementation/shared/markSweep.inline.hpp"
-#include "gc_implementation/shared/spaceDecorator.hpp"
-#include "memory/defNewGeneration.inline.hpp"
-#include "memory/referencePolicy.hpp"
-#include "oops/markOop.inline.hpp"
-#include "oops/oop.pcgc.inline.hpp"
-
-ASParNewGeneration::ASParNewGeneration(ReservedSpace rs,
-                                       size_t initial_byte_size,
-                                       size_t min_byte_size,
-                                       int level) :
-  ParNewGeneration(rs, initial_byte_size, level),
-  _min_gen_size(min_byte_size) {}
-
-const char* ASParNewGeneration::name() const {
-  return "adaptive size par new generation";
-}
-
-void ASParNewGeneration::adjust_desired_tenuring_threshold() {
-  assert(UseAdaptiveSizePolicy,
-    "Should only be used with UseAdaptiveSizePolicy");
-}
-
-void ASParNewGeneration::resize(size_t eden_size, size_t survivor_size) {
-  // Resize the generation if needed. If the generation resize
-  // reports false, do not attempt to resize the spaces.
-  if (resize_generation(eden_size, survivor_size)) {
-    // Then we lay out the spaces inside the generation
-    resize_spaces(eden_size, survivor_size);
-
-    space_invariants();
-
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("Young generation size: "
-        "desired eden: " SIZE_FORMAT " survivor: " SIZE_FORMAT
-        " used: " SIZE_FORMAT " capacity: " SIZE_FORMAT
-        " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
-        eden_size, survivor_size, used(), capacity(),
-        max_gen_size(), min_gen_size());
-    }
-  }
-}
-
-size_t ASParNewGeneration::available_to_min_gen() {
-  assert(virtual_space()->committed_size() >= min_gen_size(), "Invariant");
-  return virtual_space()->committed_size() - min_gen_size();
-}
-
-// This method assumes that from-space has live data and that
-// any shrinkage of the young gen is limited by location of
-// from-space.
-size_t ASParNewGeneration::available_to_live() const {
-#undef SHRINKS_AT_END_OF_EDEN
-#ifdef SHRINKS_AT_END_OF_EDEN
-  size_t delta_in_survivor = 0;
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  const size_t space_alignment = heap->intra_heap_alignment();
-  const size_t gen_alignment = heap->object_heap_alignment();
-
-  MutableSpace* space_shrinking = NULL;
-  if (from_space()->end() > to_space()->end()) {
-    space_shrinking = from_space();
-  } else {
-    space_shrinking = to_space();
-  }
-
-  // Include any space that is committed but not included in
-  // the survivor spaces.
-  assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(),
-    "Survivor space beyond high end");
-  size_t unused_committed = pointer_delta(virtual_space()->high(),
-    space_shrinking->end(), sizeof(char));
-
-  if (space_shrinking->is_empty()) {
-    // Don't let the space shrink to 0
-    assert(space_shrinking->capacity_in_bytes() >= space_alignment,
-      "Space is too small");
-    delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment;
-  } else {
-    delta_in_survivor = pointer_delta(space_shrinking->end(),
-                                      space_shrinking->top(),
-                                      sizeof(char));
-  }
-
-  size_t delta_in_bytes = unused_committed + delta_in_survivor;
-  delta_in_bytes = align_size_down(delta_in_bytes, gen_alignment);
-  return delta_in_bytes;
-#else
-  // The only space available for shrinking is in to-space if it
-  // is above from-space.
-  if (to()->bottom() > from()->bottom()) {
-    const size_t alignment = os::vm_page_size();
-    if (to()->capacity() < alignment) {
-      return 0;
-    } else {
-      return to()->capacity() - alignment;
-    }
-  } else {
-    return 0;
-  }
-#endif
-}
-
-// Return the number of bytes available for resizing down the young
-// generation.  This is the minimum of
-//      input "bytes"
-//      bytes to the minimum young gen size
-//      bytes to the size currently being used + some small extra
-size_t ASParNewGeneration::limit_gen_shrink (size_t bytes) {
-  // Allow shrinkage into the current eden but keep eden large enough
-  // to maintain the minimum young gen size
-  bytes = MIN3(bytes, available_to_min_gen(), available_to_live());
-  return align_size_down(bytes, os::vm_page_size());
-}
-
-// Note that the the alignment used is the OS page size as
-// opposed to an alignment associated with the virtual space
-// (as is done in the ASPSYoungGen/ASPSOldGen)
-bool ASParNewGeneration::resize_generation(size_t eden_size,
-                                           size_t survivor_size) {
-  const size_t alignment = os::vm_page_size();
-  size_t orig_size = virtual_space()->committed_size();
-  bool size_changed = false;
-
-  // There used to be this guarantee there.
-  // guarantee ((eden_size + 2*survivor_size)  <= _max_gen_size, "incorrect input arguments");
-  // Code below forces this requirement.  In addition the desired eden
-  // size and desired survivor sizes are desired goals and may
-  // exceed the total generation size.
-
-  assert(min_gen_size() <= orig_size && orig_size <= max_gen_size(),
-    "just checking");
-
-  // Adjust new generation size
-  const size_t eden_plus_survivors =
-          align_size_up(eden_size + 2 * survivor_size, alignment);
-  size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_gen_size()),
-                             min_gen_size());
-  assert(desired_size <= max_gen_size(), "just checking");
-
-  if (desired_size > orig_size) {
-    // Grow the generation
-    size_t change = desired_size - orig_size;
-    assert(change % alignment == 0, "just checking");
-    if (expand(change)) {
-      return false; // Error if we fail to resize!
-    }
-    size_changed = true;
-  } else if (desired_size < orig_size) {
-    size_t desired_change = orig_size - desired_size;
-    assert(desired_change % alignment == 0, "just checking");
-
-    desired_change = limit_gen_shrink(desired_change);
-
-    if (desired_change > 0) {
-      virtual_space()->shrink_by(desired_change);
-      reset_survivors_after_shrink();
-
-      size_changed = true;
-    }
-  } else {
-    if (Verbose && PrintGC) {
-      if (orig_size == max_gen_size()) {
-        gclog_or_tty->print_cr("ASParNew generation size at maximum: "
-          SIZE_FORMAT "K", orig_size/K);
-      } else if (orig_size == min_gen_size()) {
-        gclog_or_tty->print_cr("ASParNew generation size at minium: "
-          SIZE_FORMAT "K", orig_size/K);
-      }
-    }
-  }
-
-  if (size_changed) {
-    MemRegion cmr((HeapWord*)virtual_space()->low(),
-                  (HeapWord*)virtual_space()->high());
-    GenCollectedHeap::heap()->barrier_set()->resize_covered_region(cmr);
-
-    if (Verbose && PrintGC) {
-      size_t current_size  = virtual_space()->committed_size();
-      gclog_or_tty->print_cr("ASParNew generation size changed: "
-                             SIZE_FORMAT "K->" SIZE_FORMAT "K",
-                             orig_size/K, current_size/K);
-    }
-  }
-
-  guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
-            virtual_space()->committed_size() == max_gen_size(), "Sanity");
-
-  return true;
-}
-
-void ASParNewGeneration::reset_survivors_after_shrink() {
-
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  HeapWord* new_end = (HeapWord*)virtual_space()->high();
-
-  if (from()->end() > to()->end()) {
-    assert(new_end >= from()->end(), "Shrinking past from-space");
-  } else {
-    assert(new_end >= to()->bottom(), "Shrink was too large");
-    // Was there a shrink of the survivor space?
-    if (new_end < to()->end()) {
-      MemRegion mr(to()->bottom(), new_end);
-      to()->initialize(mr,
-                       SpaceDecorator::DontClear,
-                       SpaceDecorator::DontMangle);
-    }
-  }
-}
-void ASParNewGeneration::resize_spaces(size_t requested_eden_size,
-                                       size_t requested_survivor_size) {
-  assert(UseAdaptiveSizePolicy, "sanity check");
-  assert(requested_eden_size > 0  && requested_survivor_size > 0,
-         "just checking");
-  CollectedHeap* heap = Universe::heap();
-  assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Sanity");
-
-
-  // We require eden and to space to be empty
-  if ((!eden()->is_empty()) || (!to()->is_empty())) {
-    return;
-  }
-
-  size_t cur_eden_size = eden()->capacity();
-
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr("ASParNew::resize_spaces(requested_eden_size: "
-                  SIZE_FORMAT
-                  ", requested_survivor_size: " SIZE_FORMAT ")",
-                  requested_eden_size, requested_survivor_size);
-    gclog_or_tty->print_cr("    eden: [" PTR_FORMAT ".." PTR_FORMAT ") "
-                  SIZE_FORMAT,
-                  p2i(eden()->bottom()),
-                  p2i(eden()->end()),
-                  pointer_delta(eden()->end(),
-                                eden()->bottom(),
-                                sizeof(char)));
-    gclog_or_tty->print_cr("    from: [" PTR_FORMAT ".." PTR_FORMAT ") "
-                  SIZE_FORMAT,
-                  p2i(from()->bottom()),
-                  p2i(from()->end()),
-                  pointer_delta(from()->end(),
-                                from()->bottom(),
-                                sizeof(char)));
-    gclog_or_tty->print_cr("      to: [" PTR_FORMAT ".." PTR_FORMAT ") "
-                  SIZE_FORMAT,
-                  p2i(to()->bottom()),
-                  p2i(to()->end()),
-                  pointer_delta(  to()->end(),
-                                  to()->bottom(),
-                                  sizeof(char)));
-  }
-
-  // There's nothing to do if the new sizes are the same as the current
-  if (requested_survivor_size == to()->capacity() &&
-      requested_survivor_size == from()->capacity() &&
-      requested_eden_size == eden()->capacity()) {
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("    capacities are the right sizes, returning");
-    }
-    return;
-  }
-
-  char* eden_start = (char*)eden()->bottom();
-  char* eden_end   = (char*)eden()->end();
-  char* from_start = (char*)from()->bottom();
-  char* from_end   = (char*)from()->end();
-  char* to_start   = (char*)to()->bottom();
-  char* to_end     = (char*)to()->end();
-
-  const size_t alignment = os::vm_page_size();
-  const bool maintain_minimum =
-    (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
-
-  // Check whether from space is below to space
-  if (from_start < to_start) {
-    // Eden, from, to
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("  Eden, from, to:");
-    }
-
-    // Set eden
-    // "requested_eden_size" is a goal for the size of eden
-    // and may not be attainable.  "eden_size" below is
-    // calculated based on the location of from-space and
-    // the goal for the size of eden.  from-space is
-    // fixed in place because it contains live data.
-    // The calculation is done this way to avoid 32bit
-    // overflow (i.e., eden_start + requested_eden_size
-    // may too large for representation in 32bits).
-    size_t eden_size;
-    if (maintain_minimum) {
-      // Only make eden larger than the requested size if
-      // the minimum size of the generation has to be maintained.
-      // This could be done in general but policy at a higher
-      // level is determining a requested size for eden and that
-      // should be honored unless there is a fundamental reason.
-      eden_size = pointer_delta(from_start,
-                                eden_start,
-                                sizeof(char));
-    } else {
-      eden_size = MIN2(requested_eden_size,
-                       pointer_delta(from_start, eden_start, sizeof(char)));
-    }
-
-    eden_size = align_size_down(eden_size, alignment);
-    eden_end = eden_start + eden_size;
-    assert(eden_end >= eden_start, "addition overflowed");
-
-    // To may resize into from space as long as it is clear of live data.
-    // From space must remain page aligned, though, so we need to do some
-    // extra calculations.
-
-    // First calculate an optimal to-space
-    to_end   = (char*)virtual_space()->high();
-    to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
-                                    sizeof(char));
-
-    // Does the optimal to-space overlap from-space?
-    if (to_start < (char*)from()->end()) {
-      // Calculate the minimum offset possible for from_end
-      size_t from_size = pointer_delta(from()->top(), from_start, sizeof(char));
-
-      // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
-      if (from_size == 0) {
-        from_size = alignment;
-      } else {
-        from_size = align_size_up(from_size, alignment);
-      }
-
-      from_end = from_start + from_size;
-      assert(from_end > from_start, "addition overflow or from_size problem");
-
-      guarantee(from_end <= (char*)from()->end(), "from_end moved to the right");
-
-      // Now update to_start with the new from_end
-      to_start = MAX2(from_end, to_start);
-    } else {
-      // If shrinking, move to-space down to abut the end of from-space
-      // so that shrinking will move to-space down.  If not shrinking
-      // to-space is moving up to allow for growth on the next expansion.
-      if (requested_eden_size <= cur_eden_size) {
-        to_start = from_end;
-        if (to_start + requested_survivor_size > to_start) {
-          to_end = to_start + requested_survivor_size;
-        }
-      }
-      // else leave to_end pointing to the high end of the virtual space.
-    }
-
-    guarantee(to_start != to_end, "to space is zero sized");
-
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("    [eden_start .. eden_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(eden_start),
-                    p2i(eden_end),
-                    pointer_delta(eden_end, eden_start, sizeof(char)));
-      gclog_or_tty->print_cr("    [from_start .. from_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(from_start),
-                    p2i(from_end),
-                    pointer_delta(from_end, from_start, sizeof(char)));
-      gclog_or_tty->print_cr("    [  to_start ..   to_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(to_start),
-                    p2i(to_end),
-                    pointer_delta(  to_end,   to_start, sizeof(char)));
-    }
-  } else {
-    // Eden, to, from
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("  Eden, to, from:");
-    }
-
-    // Calculate the to-space boundaries based on
-    // the start of from-space.
-    to_end = from_start;
-    to_start = (char*)pointer_delta(from_start,
-                                    (char*)requested_survivor_size,
-                                    sizeof(char));
-    // Calculate the ideal eden boundaries.
-    // eden_end is already at the bottom of the generation
-    assert(eden_start == virtual_space()->low(),
-      "Eden is not starting at the low end of the virtual space");
-    if (eden_start + requested_eden_size >= eden_start) {
-      eden_end = eden_start + requested_eden_size;
-    } else {
-      eden_end = to_start;
-    }
-
-    // Does eden intrude into to-space?  to-space
-    // gets priority but eden is not allowed to shrink
-    // to 0.
-    if (eden_end > to_start) {
-      eden_end = to_start;
-    }
-
-    // Don't let eden shrink down to 0 or less.
-    eden_end = MAX2(eden_end, eden_start + alignment);
-    assert(eden_start + alignment >= eden_start, "Overflow");
-
-    size_t eden_size;
-    if (maintain_minimum) {
-      // Use all the space available.
-      eden_end = MAX2(eden_end, to_start);
-      eden_size = pointer_delta(eden_end, eden_start, sizeof(char));
-      eden_size = MIN2(eden_size, cur_eden_size);
-    } else {
-      eden_size = pointer_delta(eden_end, eden_start, sizeof(char));
-    }
-    eden_size = align_size_down(eden_size, alignment);
-    assert(maintain_minimum || eden_size <= requested_eden_size,
-      "Eden size is too large");
-    assert(eden_size >= alignment, "Eden size is too small");
-    eden_end = eden_start + eden_size;
-
-    // Move to-space down to eden.
-    if (requested_eden_size < cur_eden_size) {
-      to_start = eden_end;
-      if (to_start + requested_survivor_size > to_start) {
-        to_end = MIN2(from_start, to_start + requested_survivor_size);
-      } else {
-        to_end = from_start;
-      }
-    }
-
-    // eden_end may have moved so again make sure
-    // the to-space and eden don't overlap.
-    to_start = MAX2(eden_end, to_start);
-
-    // from-space
-    size_t from_used = from()->used();
-    if (requested_survivor_size > from_used) {
-      if (from_start + requested_survivor_size >= from_start) {
-        from_end = from_start + requested_survivor_size;
-      }
-      if (from_end > virtual_space()->high()) {
-        from_end = virtual_space()->high();
-      }
-    }
-
-    assert(to_start >= eden_end, "to-space should be above eden");
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("    [eden_start .. eden_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(eden_start),
-                    p2i(eden_end),
-                    pointer_delta(eden_end, eden_start, sizeof(char)));
-      gclog_or_tty->print_cr("    [  to_start ..   to_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(to_start),
-                    p2i(to_end),
-                    pointer_delta(  to_end,   to_start, sizeof(char)));
-      gclog_or_tty->print_cr("    [from_start .. from_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(from_start),
-                    p2i(from_end),
-                    pointer_delta(from_end, from_start, sizeof(char)));
-    }
-  }
-
-
-  guarantee((HeapWord*)from_start <= from()->bottom(),
-            "from start moved to the right");
-  guarantee((HeapWord*)from_end >= from()->top(),
-            "from end moved into live data");
-  assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
-  assert(is_object_aligned((intptr_t)from_start), "checking alignment");
-  assert(is_object_aligned((intptr_t)to_start), "checking alignment");
-
-  MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
-  MemRegion toMR  ((HeapWord*)to_start,   (HeapWord*)to_end);
-  MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);
-
-  // Let's make sure the call to initialize doesn't reset "top"!
-  HeapWord* old_from_top = from()->top();
-
-  // For PrintAdaptiveSizePolicy block  below
-  size_t old_from = from()->capacity();
-  size_t old_to   = to()->capacity();
-
-  // If not clearing the spaces, do some checking to verify that
-  // the spaces are already mangled.
-
-  // Must check mangling before the spaces are reshaped.  Otherwise,
-  // the bottom or end of one space may have moved into another
-  // a failure of the check may not correctly indicate which space
-  // is not properly mangled.
-  if (ZapUnusedHeapArea) {
-    HeapWord* limit = (HeapWord*) virtual_space()->high();
-    eden()->check_mangled_unused_area(limit);
-    from()->check_mangled_unused_area(limit);
-      to()->check_mangled_unused_area(limit);
-  }
-
-  // The call to initialize NULL's the next compaction space
-  eden()->initialize(edenMR,
-                     SpaceDecorator::Clear,
-                     SpaceDecorator::DontMangle);
-  eden()->set_next_compaction_space(from());
-    to()->initialize(toMR  ,
-                     SpaceDecorator::Clear,
-                     SpaceDecorator::DontMangle);
-  from()->initialize(fromMR,
-                     SpaceDecorator::DontClear,
-                     SpaceDecorator::DontMangle);
-
-  assert(from()->top() == old_from_top, "from top changed!");
-
-  if (PrintAdaptiveSizePolicy) {
-    GenCollectedHeap* gch = GenCollectedHeap::heap();
-    assert(gch->kind() == CollectedHeap::GenCollectedHeap, "Sanity");
-
-    gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
-                  "collection: %d "
-                  "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
-                  "(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
-                  gch->total_collections(),
-                  old_from, old_to,
-                  from()->capacity(),
-                  to()->capacity());
-    gclog_or_tty->cr();
-  }
-}
-
-void ASParNewGeneration::compute_new_size() {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->kind() == CollectedHeap::GenCollectedHeap,
-    "not a CMS generational heap");
-
-
-  CMSAdaptiveSizePolicy* size_policy =
-    (CMSAdaptiveSizePolicy*)gch->gen_policy()->size_policy();
-  assert(size_policy->is_gc_cms_adaptive_size_policy(),
-    "Wrong type of size policy");
-
-  size_t survived = from()->used();
-  if (!survivor_overflow()) {
-    // Keep running averages on how much survived
-    size_policy->avg_survived()->sample(survived);
-  } else {
-    size_t promoted =
-      (size_t) next_gen()->gc_stats()->avg_promoted()->last_sample();
-    assert(promoted < gch->capacity(), "Conversion problem?");
-    size_t survived_guess = survived + promoted;
-    size_policy->avg_survived()->sample(survived_guess);
-  }
-
-  size_t survivor_limit = max_survivor_size();
-  _tenuring_threshold =
-    size_policy->compute_survivor_space_size_and_threshold(
-                                                     _survivor_overflow,
-                                                     _tenuring_threshold,
-                                                     survivor_limit);
-  size_policy->avg_young_live()->sample(used());
-  size_policy->avg_eden_live()->sample(eden()->used());
-
-  size_policy->compute_eden_space_size(eden()->capacity(), max_gen_size());
-
-  resize(size_policy->calculated_eden_size_in_bytes(),
-         size_policy->calculated_survivor_size_in_bytes());
-
-  if (UsePerfData) {
-    CMSGCAdaptivePolicyCounters* counters =
-      (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
-    assert(counters->kind() ==
-           GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
-      "Wrong kind of counters");
-    counters->update_tenuring_threshold(_tenuring_threshold);
-    counters->update_survivor_overflowed(_survivor_overflow);
-    counters->update_young_capacity(capacity());
-  }
-}
-
-
-#ifndef PRODUCT
-// Changes from PSYoungGen version
-//      value of "alignment"
-void ASParNewGeneration::space_invariants() {
-  const size_t alignment = os::vm_page_size();
-
-  // Currently, our eden size cannot shrink to zero
-  guarantee(eden()->capacity() >= alignment, "eden too small");
-  guarantee(from()->capacity() >= alignment, "from too small");
-  guarantee(to()->capacity() >= alignment, "to too small");
-
-  // Relationship of spaces to each other
-  char* eden_start = (char*)eden()->bottom();
-  char* eden_end   = (char*)eden()->end();
-  char* from_start = (char*)from()->bottom();
-  char* from_end   = (char*)from()->end();
-  char* to_start   = (char*)to()->bottom();
-  char* to_end     = (char*)to()->end();
-
-  guarantee(eden_start >= virtual_space()->low(), "eden bottom");
-  guarantee(eden_start < eden_end, "eden space consistency");
-  guarantee(from_start < from_end, "from space consistency");
-  guarantee(to_start < to_end, "to space consistency");
-
-  // Check whether from space is below to space
-  if (from_start < to_start) {
-    // Eden, from, to
-    guarantee(eden_end <= from_start, "eden/from boundary");
-    guarantee(from_end <= to_start,   "from/to boundary");
-    guarantee(to_end <= virtual_space()->high(), "to end");
-  } else {
-    // Eden, to, from
-    guarantee(eden_end <= to_start, "eden/to boundary");
-    guarantee(to_end <= from_start, "to/from boundary");
-    guarantee(from_end <= virtual_space()->high(), "from end");
-  }
-
-  // More checks that the virtual space is consistent with the spaces
-  assert(virtual_space()->committed_size() >=
-    (eden()->capacity() +
-     to()->capacity() +
-     from()->capacity()), "Committed size is inconsistent");
-  assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
-    "Space invariant");
-  char* eden_top = (char*)eden()->top();
-  char* from_top = (char*)from()->top();
-  char* to_top = (char*)to()->top();
-  assert(eden_top <= virtual_space()->high(), "eden top");
-  assert(from_top <= virtual_space()->high(), "from top");
-  assert(to_top <= virtual_space()->high(), "to top");
-}
-#endif
--- a/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_ASPARNEWGENERATION_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_PARNEW_ASPARNEWGENERATION_HPP
-
-#include "gc_implementation/parNew/parNewGeneration.hpp"
-#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
-
-// A Generation that does parallel young-gen collection extended
-// for adaptive size policy.
-
-// Division of generation into spaces
-// done by DefNewGeneration::compute_space_boundaries()
-//      +---------------+
-//      | uncommitted   |
-//      |---------------|
-//      | ss0           |
-//      |---------------|
-//      | ss1           |
-//      |---------------|
-//      |               |
-//      | eden          |
-//      |               |
-//      +---------------+       <-- low end of VirtualSpace
-//
-class ASParNewGeneration: public ParNewGeneration {
-
-  size_t _min_gen_size;
-
-  // Resize the generation based on the desired sizes of
-  // the constituent spaces.
-  bool resize_generation(size_t eden_size, size_t survivor_size);
-  // Resize the spaces based on their desired sizes but
-  // respecting the maximum size of the generation.
-  void resize_spaces(size_t eden_size, size_t survivor_size);
-  // Return the byte size remaining to the minimum generation size.
-  size_t available_to_min_gen();
-  // Return the byte size remaining to the live data in the generation.
-  size_t available_to_live() const;
-  // Return the byte size that the generation is allowed to shrink.
-  size_t limit_gen_shrink(size_t bytes);
-  // Reset the size of the spaces after a shrink of the generation.
-  void reset_survivors_after_shrink();
-
-  // Accessor
-  VirtualSpace* virtual_space() { return &_virtual_space; }
-
-  virtual void adjust_desired_tenuring_threshold();
-
- public:
-
-  ASParNewGeneration(ReservedSpace rs,
-                     size_t initial_byte_size,
-                     size_t min_byte_size,
-                     int level);
-
-  virtual const char* short_name() const { return "ASParNew"; }
-  virtual const char* name() const;
-  virtual Generation::Name kind() { return ASParNew; }
-
-  // Change the sizes of eden and the survivor spaces in
-  // the generation.  The parameters are desired sizes
-  // and are not guaranteed to be met.  For example, if
-  // the total is larger than the generation.
-  void resize(size_t eden_size, size_t survivor_size);
-
-  virtual void compute_new_size();
-
-  size_t max_gen_size()                 { return _reserved.byte_size(); }
-  size_t min_gen_size() const           { return _min_gen_size; }
-
-  // Space boundary invariant checker
-  void space_invariants() PRODUCT_RETURN;
-};
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_ASPARNEWGENERATION_HPP
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -614,18 +614,21 @@
 
   KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
                                       gch->rem_set()->klass_rem_set());
-
-  int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_ScavengeCodeCache;
+  CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
+                                           &par_scan_state.to_space_root_closure(),
+                                           false);
 
   par_scan_state.start_strong_roots();
-  gch->gen_process_strong_roots(_gen->level(),
-                                true,  // Process younger gens, if any,
-                                       // as strong roots.
-                                false, // no scope; this is parallel code
-                                SharedHeap::ScanningOption(so),
-                                &par_scan_state.to_space_root_closure(),
-                                &par_scan_state.older_gen_closure(),
-                                &klass_scan_closure);
+  gch->gen_process_roots(_gen->level(),
+                         true,  // Process younger gens, if any,
+                                // as strong roots.
+                         false, // no scope; this is parallel code
+                         SharedHeap::SO_ScavengeCodeCache,
+                         GenCollectedHeap::StrongAndWeakRoots,
+                         &par_scan_state.to_space_root_closure(),
+                         &par_scan_state.older_gen_closure(),
+                         &cld_scan_closure);
+
   par_scan_state.end_strong_roots();
 
   // "evacuate followers".
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -69,7 +69,7 @@
   ParScanWithoutBarrierClosure         _to_space_closure; // scan_without_gc_barrier
   ParScanWithBarrierClosure            _old_gen_closure; // scan_with_gc_barrier
   ParRootScanWithoutBarrierClosure     _to_space_root_closure; // scan_root_without_gc_barrier
-  // One of these two will be passed to process_strong_roots, which will
+  // One of these two will be passed to process_roots, which will
   // set its generation.  The first is for two-gen configs where the
   // old gen collects the perm gen; the second is for arbitrary configs.
   // The second isn't used right now (it used to be used for the train, an
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,21 +30,6 @@
 #include "runtime/os.hpp"
 #include "utilities/bitMap.inline.hpp"
 #include "services/memTracker.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_aix
-# include "os_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 
 bool
 ParMarkBitMap::initialize(MemRegion covered_region)
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -59,7 +59,7 @@
 
   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
   CLDToOopClosure mark_and_push_from_clds(&mark_and_push_closure, true);
-  CodeBlobToOopClosure mark_and_push_in_blobs(&mark_and_push_closure, /*do_marking=*/ true);
+  MarkingCodeBlobClosure mark_and_push_in_blobs(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
 
   if (_java_thread != NULL)
     _java_thread->oops_do(
@@ -100,7 +100,7 @@
     case threads:
     {
       ResourceMark rm;
-      CodeBlobToOopClosure each_active_code_blob(&mark_and_push_closure, /*do_marking=*/ true);
+      MarkingCodeBlobClosure each_active_code_blob(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
       CLDToOopClosure mark_and_push_from_cld(&mark_and_push_closure);
       Threads::oops_do(&mark_and_push_closure, &mark_and_push_from_cld, &each_active_code_blob);
     }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -536,14 +536,14 @@
     Universe::oops_do(mark_and_push_closure());
     JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
     CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());
-    CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
+    MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
     Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);
     ObjectSynchronizer::oops_do(mark_and_push_closure());
     FlatProfiler::oops_do(mark_and_push_closure());
     Management::oops_do(mark_and_push_closure());
     JvmtiExport::oops_do(mark_and_push_closure());
     SystemDictionary::always_strong_oops_do(mark_and_push_closure());
-    ClassLoaderDataGraph::always_strong_oops_do(mark_and_push_closure(), follow_klass_closure(), true);
+    ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
     // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
     //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
   }
@@ -633,16 +633,16 @@
   FlatProfiler::oops_do(adjust_pointer_closure());
   Management::oops_do(adjust_pointer_closure());
   JvmtiExport::oops_do(adjust_pointer_closure());
-  // SO_AllClasses
   SystemDictionary::oops_do(adjust_pointer_closure());
-  ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
+  ClassLoaderDataGraph::cld_do(adjust_cld_closure());
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
   // Global (weak) JNI handles
   JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
 
-  CodeCache::oops_do(adjust_pointer_closure());
+  CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
+  CodeCache::blobs_do(&adjust_from_blobs);
   StringTable::oops_do(adjust_pointer_closure());
   ref_processor()->weak_oops_do(adjust_pointer_closure());
   PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -40,11 +40,11 @@
   static CollectorCounters*  _counters;
 
   // Closure accessors
-  static OopClosure* mark_and_push_closure() { return &MarkSweep::mark_and_push_closure; }
-  static KlassClosure* follow_klass_closure() { return &MarkSweep::follow_klass_closure; }
-  static VoidClosure* follow_stack_closure() { return (VoidClosure*)&MarkSweep::follow_stack_closure; }
-  static OopClosure* adjust_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_pointer_closure; }
-  static KlassClosure* adjust_klass_closure() { return &MarkSweep::adjust_klass_closure; }
+  static OopClosure* mark_and_push_closure()   { return &MarkSweep::mark_and_push_closure; }
+  static VoidClosure* follow_stack_closure()   { return (VoidClosure*)&MarkSweep::follow_stack_closure; }
+  static CLDClosure* follow_cld_closure()      { return &MarkSweep::follow_cld_closure; }
+  static OopClosure* adjust_pointer_closure()  { return (OopClosure*)&MarkSweep::adjust_pointer_closure; }
+  static CLDClosure* adjust_cld_closure()      { return &MarkSweep::adjust_cld_closure; }
   static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&MarkSweep::is_alive; }
 
  debug_only(public:)  // Used for PSParallelCompact debugging
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -2474,7 +2474,6 @@
   FlatProfiler::oops_do(adjust_pointer_closure());
   Management::oops_do(adjust_pointer_closure());
   JvmtiExport::oops_do(adjust_pointer_closure());
-  // SO_AllClasses
   SystemDictionary::oops_do(adjust_pointer_closure());
   ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
 
@@ -2483,7 +2482,8 @@
   // Global (weak) JNI handles
   JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
 
-  CodeCache::oops_do(adjust_pointer_closure());
+  CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
+  CodeCache::blobs_do(&adjust_from_blobs);
   StringTable::oops_do(adjust_pointer_closure());
   ref_processor()->weak_oops_do(adjust_pointer_closure());
   // Roots were visited so references into the young gen in roots
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -100,7 +100,7 @@
 
     case code_cache:
       {
-        CodeBlobToOopClosure each_scavengable_code_blob(&roots_to_old_closure, /*do_marking=*/ true);
+        MarkingCodeBlobClosure each_scavengable_code_blob(&roots_to_old_closure, CodeBlobToOopClosure::FixRelocations);
         CodeCache::scavenge_root_nmethods_do(&each_scavengable_code_blob);
       }
       break;
@@ -123,7 +123,7 @@
   PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
   PSScavengeRootsClosure roots_closure(pm);
   CLDClosure* roots_from_clds = NULL;  // Not needed. All CLDs are already visited.
-  CodeBlobToOopClosure roots_in_blobs(&roots_closure, /*do_marking=*/ true);
+  MarkingCodeBlobClosure roots_in_blobs(&roots_closure, CodeBlobToOopClosure::FixRelocations);
 
   if (_java_thread != NULL)
     _java_thread->oops_do(&roots_closure, roots_from_clds, &roots_in_blobs);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -26,21 +26,6 @@
 #include "gc_implementation/parallelScavenge/psVirtualspace.hpp"
 #include "runtime/os.hpp"
 #include "runtime/virtualspace.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_aix
-# include "os_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
--- a/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -507,7 +507,7 @@
     // always fail (never do the print based on the interval test).
     return PrintGCDetails &&
            UseAdaptiveSizePolicy &&
-           (UseParallelGC || UseConcMarkSweepGC) &&
+           UseParallelGC &&
            (AdaptiveSizePolicyOutputInterval > 0) &&
            ((count == 0) ||
              ((count % AdaptiveSizePolicyOutputInterval) == 0));
--- a/hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -134,7 +134,7 @@
     Threads::add(res);
     Thread::start(res);
   }
-  os::yield(); // This seems to help with initial start-up of SLT
+  os::naked_yield(); // This seems to help with initial start-up of SLT
   return res;
 }
 
--- a/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -216,16 +216,4 @@
   bool increment_will_decrease();
 };
 
-class GCPauseTimer : StackObj {
-  elapsedTimer* _timer;
- public:
-  GCPauseTimer(elapsedTimer* timer) {
-    _timer = timer;
-    _timer->stop();
-  }
-  ~GCPauseTimer() {
-    _timer->start();
-  }
-};
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCUTIL_HPP
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -54,21 +54,14 @@
 void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
 
 MarkSweep::MarkAndPushClosure MarkSweep::mark_and_push_closure;
-MarkSweep::FollowKlassClosure MarkSweep::follow_klass_closure;
-MarkSweep::AdjustKlassClosure MarkSweep::adjust_klass_closure;
+CLDToOopClosure               MarkSweep::follow_cld_closure(&mark_and_push_closure);
+CLDToOopClosure               MarkSweep::adjust_cld_closure(&adjust_pointer_closure);
 
 void MarkSweep::MarkAndPushClosure::do_oop(oop* p)       { mark_and_push(p); }
 void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(p); }
 
-void MarkSweep::FollowKlassClosure::do_klass(Klass* klass) {
-  klass->oops_do(&MarkSweep::mark_and_push_closure);
-}
-void MarkSweep::AdjustKlassClosure::do_klass(Klass* klass) {
-  klass->oops_do(&MarkSweep::adjust_pointer_closure);
-}
-
 void MarkSweep::follow_class_loader(ClassLoaderData* cld) {
-  cld->oops_do(&MarkSweep::mark_and_push_closure, &MarkSweep::follow_klass_closure, true);
+  MarkSweep::follow_cld_closure.do_cld(cld);
 }
 
 void MarkSweep::follow_stack() {
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -65,17 +65,6 @@
     virtual void do_oop(narrowOop* p);
   };
 
-  // The one and only place to start following the classes.
-  // Should only be applied to the ClassLoaderData klasses list.
-  class FollowKlassClosure : public KlassClosure {
-   public:
-    void do_klass(Klass* klass);
-  };
-  class AdjustKlassClosure : public KlassClosure {
-   public:
-    void do_klass(Klass* klass);
-  };
-
   class FollowStackClosure: public VoidClosure {
    public:
     virtual void do_void();
@@ -144,10 +133,10 @@
   static IsAliveClosure       is_alive;
   static FollowRootClosure    follow_root_closure;
   static MarkAndPushClosure   mark_and_push_closure;
-  static FollowKlassClosure   follow_klass_closure;
   static FollowStackClosure   follow_stack_closure;
+  static CLDToOopClosure      follow_cld_closure;
   static AdjustPointerClosure adjust_pointer_closure;
-  static AdjustKlassClosure   adjust_klass_closure;
+  static CLDToOopClosure      adjust_cld_closure;
 
   // Accessors
   static uint total_invocations() { return _total_invocations; }
--- a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -195,6 +195,43 @@
   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
 }
 
+bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
+#if INCLUDE_ALL_GCS
+  if (UseConcMarkSweepGC || UseG1GC) {
+    if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
+      MetaspaceGC::set_should_concurrent_collect(true);
+    } else if (UseG1GC) {
+      G1CollectedHeap* g1h = G1CollectedHeap::heap();
+      g1h->g1_policy()->set_initiate_conc_mark_if_possible();
+
+      GCCauseSetter x(g1h, _gc_cause);
+
+      // At this point we are supposed to start a concurrent cycle. We
+      // will do so if one is not already in progress.
+      bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
+
+      if (should_start) {
+        double pause_target = g1h->g1_policy()->max_pause_time_ms();
+        g1h->do_collection_pause_at_safepoint(pause_target);
+      }
+    }
+
+    return true;
+  }
+#endif
+  return false;
+}
+
+static void log_metaspace_alloc_failure_for_concurrent_GC() {
+  if (Verbose && PrintGCDetails) {
+    if (UseConcMarkSweepGC) {
+      gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
+    } else if (UseG1GC) {
+      gclog_or_tty->print_cr("\nG1 full GC for Metaspace");
+    }
+  }
+}
+
 void VM_CollectForMetadataAllocation::doit() {
   SvcGCMarker sgcm(SvcGCMarker::FULL);
 
@@ -206,54 +243,57 @@
   // a GC that freed space for the allocation.
   if (!MetadataAllocationFailALot) {
     _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
-  }
-
-  if (_result == NULL) {
-    if (UseConcMarkSweepGC) {
-      if (CMSClassUnloadingEnabled) {
-        MetaspaceGC::set_should_concurrent_collect(true);
-      }
-      // For CMS expand since the collection is going to be concurrent.
-      _result =
-        _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
-    }
-    if (_result == NULL) {
-      // Don't clear the soft refs yet.
-      if (Verbose && PrintGCDetails && UseConcMarkSweepGC) {
-        gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
-      }
-      heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
-      // After a GC try to allocate without expanding.  Could fail
-      // and expansion will be tried below.
-      _result =
-        _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
-    }
-    if (_result == NULL) {
-      // If still failing, allow the Metaspace to expand.
-      // See delta_capacity_until_GC() for explanation of the
-      // amount of the expansion.
-      // This should work unless there really is no more space
-      // or a MaxMetaspaceSize has been specified on the command line.
-      _result =
-        _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
-      if (_result == NULL) {
-        // If expansion failed, do a last-ditch collection and try allocating
-        // again.  A last-ditch collection will clear softrefs.  This
-        // behavior is similar to the last-ditch collection done for perm
-        // gen when it was full and a collection for failed allocation
-        // did not free perm gen space.
-        heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
-        _result =
-          _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
-      }
-    }
-    if (Verbose && PrintGCDetails && _result == NULL) {
-      gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
-                             SIZE_FORMAT, _size);
+    if (_result != NULL) {
+      return;
     }
   }
 
-  if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
+  if (initiate_concurrent_GC()) {
+    // For CMS and G1 expand since the collection is going to be concurrent.
+    _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
+    if (_result != NULL) {
+      return;
+    }
+
+    log_metaspace_alloc_failure_for_concurrent_GC();
+  }
+
+  // Don't clear the soft refs yet.
+  heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
+  // After a GC try to allocate without expanding.  Could fail
+  // and expansion will be tried below.
+  _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
+  if (_result != NULL) {
+    return;
+  }
+
+  // If still failing, allow the Metaspace to expand.
+  // See delta_capacity_until_GC() for explanation of the
+  // amount of the expansion.
+  // This should work unless there really is no more space
+  // or a MaxMetaspaceSize has been specified on the command line.
+  _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
+  if (_result != NULL) {
+    return;
+  }
+
+  // If expansion failed, do a last-ditch collection and try allocating
+  // again.  A last-ditch collection will clear softrefs.  This
+  // behavior is similar to the last-ditch collection done for perm
+  // gen when it was full and a collection for failed allocation
+  // did not free perm gen space.
+  heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
+  _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
+  if (_result != NULL) {
+    return;
+  }
+
+  if (Verbose && PrintGCDetails) {
+    gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
+                           SIZE_FORMAT, _size);
+  }
+
+  if (GC_locker::is_active_and_needs_gc()) {
     set_gc_locked();
   }
 }
--- a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -217,6 +217,8 @@
   virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; }
   virtual void doit();
   MetaWord* result() const       { return _result; }
+
+  bool initiate_concurrent_GC();
 };
 
 class SvcGCMarker : public StackObj {
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1093,6 +1093,7 @@
 address SignatureHandlerLibrary::set_handler_blob() {
   BufferBlob* handler_blob = BufferBlob::create("native signature handlers", blob_size);
   if (handler_blob == NULL) {
+    CompileBroker::handle_full_code_cache();
     return NULL;
   }
   address handler = handler_blob->code_begin();
--- a/hotspot/src/share/vm/interpreter/oopMapCache.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/interpreter/oopMapCache.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -180,7 +180,7 @@
   }
 }
 
-bool InterpreterOopMap::is_empty() {
+bool InterpreterOopMap::is_empty() const {
   bool result = _method == NULL;
   assert(_method != NULL || (_bci == 0 &&
     (_mask_size == 0 || _mask_size == USHRT_MAX) &&
@@ -196,7 +196,7 @@
   for (int i = 0; i < N; i++) _bit_mask[i] = 0;
 }
 
-void InterpreterOopMap::iterate_oop(OffsetClosure* oop_closure) {
+void InterpreterOopMap::iterate_oop(OffsetClosure* oop_closure) const {
   int n = number_of_entries();
   int word_index = 0;
   uintptr_t value = 0;
@@ -238,7 +238,7 @@
 #endif
 
 
-void InterpreterOopMap::print() {
+void InterpreterOopMap::print() const {
   int n = number_of_entries();
   tty->print("oop map for ");
   method()->print_value();
@@ -469,7 +469,7 @@
   }
 }
 
-inline unsigned int OopMapCache::hash_value_for(methodHandle method, int bci) {
+inline unsigned int OopMapCache::hash_value_for(methodHandle method, int bci) const {
   // We use method->code_size() rather than method->identity_hash() below since
   // the mark may not be present if a pointer to the method is already reversed.
   return   ((unsigned int) bci)
@@ -522,7 +522,7 @@
 
 void OopMapCache::lookup(methodHandle method,
                          int bci,
-                         InterpreterOopMap* entry_for) {
+                         InterpreterOopMap* entry_for) const {
   MutexLocker x(&_mut);
 
   OopMapCacheEntry* entry = NULL;
--- a/hotspot/src/share/vm/interpreter/oopMapCache.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/interpreter/oopMapCache.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -101,32 +101,31 @@
 
   // access methods
   Method*        method() const                  { return _method; }
-  void           set_method(Method* v)         { _method = v; }
+  void           set_method(Method* v)           { _method = v; }
   int            bci() const                     { return _bci; }
   void           set_bci(int v)                  { _bci = v; }
   int            mask_size() const               { return _mask_size; }
   void           set_mask_size(int v)            { _mask_size = v; }
-  int            number_of_entries() const       { return mask_size() / bits_per_entry; }
   // Test bit mask size and return either the in-line bit mask or allocated
   // bit mask.
-  uintptr_t*  bit_mask()                         { return (uintptr_t*)(mask_size() <= small_mask_limit ? (intptr_t)_bit_mask : _bit_mask[0]); }
+  uintptr_t*  bit_mask() const                   { return (uintptr_t*)(mask_size() <= small_mask_limit ? (intptr_t)_bit_mask : _bit_mask[0]); }
 
   // return the word size of_bit_mask.  mask_size() <= 4 * MAX_USHORT
-  size_t mask_word_size() {
+  size_t mask_word_size() const {
     return (mask_size() + BitsPerWord - 1) / BitsPerWord;
   }
 
-  uintptr_t entry_at(int offset)            { int i = offset * bits_per_entry; return bit_mask()[i / BitsPerWord] >> (i % BitsPerWord); }
+  uintptr_t entry_at(int offset) const           { int i = offset * bits_per_entry; return bit_mask()[i / BitsPerWord] >> (i % BitsPerWord); }
 
-  void set_expression_stack_size(int sz)    { _expression_stack_size = sz; }
+  void set_expression_stack_size(int sz)         { _expression_stack_size = sz; }
 
 #ifdef ENABLE_ZAP_DEAD_LOCALS
-  bool is_dead(int offset)                       { return (entry_at(offset) & (1 << dead_bit_number)) != 0; }
+  bool is_dead(int offset) const                 { return (entry_at(offset) & (1 << dead_bit_number)) != 0; }
 #endif
 
   // Lookup
-  bool match(methodHandle method, int bci)       { return _method == method() && _bci == bci; }
-  bool is_empty();
+  bool match(methodHandle method, int bci) const { return _method == method() && _bci == bci; }
+  bool is_empty() const;
 
   // Initialization
   void initialize();
@@ -141,12 +140,13 @@
   // in-line), allocate the space from a Resource area.
   void resource_copy(OopMapCacheEntry* from);
 
-  void iterate_oop(OffsetClosure* oop_closure);
-  void print();
+  void iterate_oop(OffsetClosure* oop_closure) const;
+  void print() const;
 
-  bool is_oop  (int offset)                      { return (entry_at(offset) & (1 << oop_bit_number )) != 0; }
+  int number_of_entries() const                  { return mask_size() / bits_per_entry; }
+  bool is_oop (int offset) const                 { return (entry_at(offset) & (1 << oop_bit_number )) != 0; }
 
-  int expression_stack_size()                    { return _expression_stack_size; }
+  int expression_stack_size() const              { return _expression_stack_size; }
 
 #ifdef ENABLE_ZAP_DEAD_LOCALS
   void iterate_all(OffsetClosure* oop_closure, OffsetClosure* value_closure, OffsetClosure* dead_closure);
@@ -161,10 +161,10 @@
 
   OopMapCacheEntry* _array;
 
-  unsigned int hash_value_for(methodHandle method, int bci);
+  unsigned int hash_value_for(methodHandle method, int bci) const;
   OopMapCacheEntry* entry_at(int i) const;
 
-  Mutex _mut;
+  mutable Mutex _mut;
 
   void flush();
 
@@ -177,7 +177,7 @@
 
   // Returns the oopMap for (method, bci) in parameter "entry".
   // Returns false if an oop map was not found.
-  void lookup(methodHandle method, int bci, InterpreterOopMap* entry);
+  void lookup(methodHandle method, int bci, InterpreterOopMap* entry) const;
 
   // Compute an oop map without updating the cache or grabbing any locks (for debugging)
   static void compute_one_oop_map(methodHandle method, int bci, InterpreterOopMap* entry);
--- a/hotspot/src/share/vm/memory/allocation.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/memory/allocation.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -36,22 +36,6 @@
 #include "services/memTracker.hpp"
 #include "utilities/ostream.hpp"
 
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_aix
-# include "os_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
-
 void* StackObj::operator new(size_t size)     throw() { ShouldNotCallThis(); return 0; }
 void  StackObj::operator delete(void* p)              { ShouldNotCallThis(); }
 void* StackObj::operator new [](size_t size)  throw() { ShouldNotCallThis(); return 0; }
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -429,7 +429,7 @@
                                                                  OopsInGenClosure* cl,
                                                                  CardTableRS* ct) {
   if (!mr.is_empty()) {
-    // Caller (process_strong_roots()) claims that all GC threads
+    // Caller (process_roots()) claims that all GC threads
     // execute this call.  With UseDynamicNumberOfGCThreads now all
     // active GC threads execute this call.  The number of active GC
     // threads needs to be passed to par_non_clean_card_iterate_work()
@@ -438,7 +438,7 @@
     // This is an example of where n_par_threads() is used instead
     // of workers()->active_workers().  n_par_threads can be set to 0 to
     // turn off parallelism.  For example when this code is called as
-    // part of verification and SharedHeap::process_strong_roots() is being
+    // part of verification and SharedHeap::process_roots() is being
     // used, then n_par_threads() may have been set to 0.  active_workers
     // is not overloaded with the meaning that it is a switch to disable
     // parallelism and so keeps the meaning of the number of
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -40,10 +40,6 @@
 #include "runtime/thread.inline.hpp"
 #include "runtime/vmThread.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
-#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
-#endif // INCLUDE_ALL_GCS
 
 // CollectorPolicy methods
 
--- a/hotspot/src/share/vm/memory/collectorPolicy.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/memory/collectorPolicy.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -115,7 +115,6 @@
     CollectorPolicyKind,
     GenCollectorPolicyKind,
     ConcurrentMarkSweepPolicyKind,
-    ASConcurrentMarkSweepPolicyKind,
     G1CollectorPolicyKind
   };
 
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -614,6 +614,9 @@
 
   KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
                                       gch->rem_set()->klass_rem_set());
+  CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
+                                           &fsc_with_no_gc_barrier,
+                                           false);
 
   set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
   FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
@@ -623,16 +626,15 @@
   assert(gch->no_allocs_since_save_marks(0),
          "save marks have not been newly set.");
 
-  int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_ScavengeCodeCache;
-
-  gch->gen_process_strong_roots(_level,
-                                true,  // Process younger gens, if any,
-                                       // as strong roots.
-                                true,  // activate StrongRootsScope
-                                SharedHeap::ScanningOption(so),
-                                &fsc_with_no_gc_barrier,
-                                &fsc_with_gc_barrier,
-                                &klass_scan_closure);
+  gch->gen_process_roots(_level,
+                         true,  // Process younger gens, if any,
+                                // as strong roots.
+                         true,  // activate StrongRootsScope
+                         SharedHeap::SO_ScavengeCodeCache,
+                         GenCollectedHeap::StrongAndWeakRoots,
+                         &fsc_with_no_gc_barrier,
+                         &fsc_with_gc_barrier,
+                         &cld_scan_closure);
 
   // "evacuate followers".
   evacuate_followers.do_void();
--- a/hotspot/src/share/vm/memory/gcLocker.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/memory/gcLocker.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -29,19 +29,6 @@
 #include "memory/genCollectedHeap.hpp"
 #include "memory/universe.hpp"
 #include "oops/oop.hpp"
-#include "runtime/thread.inline.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 
 // The direct lock/unlock calls do not force a collection if an unlock
 // decrements the count to zero. Avoid calling these if at all possible.
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -61,8 +61,8 @@
 GenCollectedHeap* GenCollectedHeap::_gch;
 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
 
-// The set of potentially parallel tasks in strong root scanning.
-enum GCH_process_strong_roots_tasks {
+// The set of potentially parallel tasks in root scanning.
+enum GCH_strong_roots_tasks {
   // We probably want to parallelize both of these internally, but for now...
   GCH_PS_younger_gens,
   // Leave this one last.
@@ -72,11 +72,11 @@
 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
   SharedHeap(policy),
   _gen_policy(policy),
-  _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
+  _gen_process_roots_tasks(new SubTasksDone(GCH_PS_NumElements)),
   _full_collections_completed(0)
 {
-  if (_gen_process_strong_tasks == NULL ||
-      !_gen_process_strong_tasks->valid()) {
+  if (_gen_process_roots_tasks == NULL ||
+      !_gen_process_roots_tasks->valid()) {
     vm_exit_during_initialization("Failed necessary allocation.");
   }
   assert(policy != NULL, "Sanity check");
@@ -202,13 +202,11 @@
   guarantee(policy->is_generation_policy(), "Illegal policy type");
   DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
   assert(def_new_gen->kind() == Generation::DefNew ||
-         def_new_gen->kind() == Generation::ParNew ||
-         def_new_gen->kind() == Generation::ASParNew,
+         def_new_gen->kind() == Generation::ParNew,
          "Wrong generation kind");
 
   Generation* old_gen = get_gen(1);
   assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
-         old_gen->kind() == Generation::ASConcurrentMarkSweep ||
          old_gen->kind() == Generation::MarkSweepCompact,
     "Wrong generation kind");
 
@@ -573,9 +571,6 @@
     }
   }
 
-  AdaptiveSizePolicy* sp = gen_policy()->size_policy();
-  AdaptiveSizePolicyOutput(sp, total_collections());
-
   print_heap_after_gc();
 
 #ifdef TRACESPINNING
@@ -589,24 +584,29 @@
 
 void GenCollectedHeap::set_par_threads(uint t) {
   SharedHeap::set_par_threads(t);
-  _gen_process_strong_tasks->set_n_threads(t);
+  _gen_process_roots_tasks->set_n_threads(t);
 }
 
 void GenCollectedHeap::
-gen_process_strong_roots(int level,
-                         bool younger_gens_as_roots,
-                         bool activate_scope,
-                         SharedHeap::ScanningOption so,
-                         OopsInGenClosure* not_older_gens,
-                         OopsInGenClosure* older_gens,
-                         KlassClosure* klass_closure) {
-  // General strong roots.
+gen_process_roots(int level,
+                  bool younger_gens_as_roots,
+                  bool activate_scope,
+                  SharedHeap::ScanningOption so,
+                  OopsInGenClosure* not_older_gens,
+                  OopsInGenClosure* weak_roots,
+                  OopsInGenClosure* older_gens,
+                  CLDClosure* cld_closure,
+                  CLDClosure* weak_cld_closure,
+                  CodeBlobClosure* code_closure) {
 
-  SharedHeap::process_strong_roots(activate_scope, so,
-                                   not_older_gens, klass_closure);
+  // General roots.
+  SharedHeap::process_roots(activate_scope, so,
+                            not_older_gens, weak_roots,
+                            cld_closure, weak_cld_closure,
+                            code_closure);
 
   if (younger_gens_as_roots) {
-    if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
+    if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) {
       for (int i = 0; i < level; i++) {
         not_older_gens->set_generation(_gens[i]);
         _gens[i]->oop_iterate(not_older_gens);
@@ -622,7 +622,38 @@
     older_gens->reset_generation();
   }
 
-  _gen_process_strong_tasks->all_tasks_completed();
+  _gen_process_roots_tasks->all_tasks_completed();
+}
+
+void GenCollectedHeap::
+gen_process_roots(int level,
+                  bool younger_gens_as_roots,
+                  bool activate_scope,
+                  SharedHeap::ScanningOption so,
+                  bool only_strong_roots,
+                  OopsInGenClosure* not_older_gens,
+                  OopsInGenClosure* older_gens,
+                  CLDClosure* cld_closure) {
+
+  const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
+
+  bool is_moving_collection = false;
+  if (level == 0 || is_adjust_phase) {
+    // young collections are always moving
+    is_moving_collection = true;
+  }
+
+  MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
+  CodeBlobClosure* code_closure = &mark_code_closure;
+
+  gen_process_roots(level,
+                    younger_gens_as_roots,
+                    activate_scope, so,
+                    not_older_gens, only_strong_roots ? NULL : not_older_gens,
+                    older_gens,
+                    cld_closure, only_strong_roots ? NULL : cld_closure,
+                    code_closure);
+
 }
 
 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
@@ -724,8 +755,7 @@
 #if INCLUDE_ALL_GCS
 bool GenCollectedHeap::create_cms_collector() {
 
-  assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
-         (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)),
+  assert(_gens[1]->kind() == Generation::ConcurrentMarkSweep,
          "Unexpected generation kinds");
   // Skip two header words in the block content verification
   NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -78,9 +78,9 @@
   unsigned int _full_collections_completed;
 
   // Data structure for claiming the (potentially) parallel tasks in
-  // (gen-specific) strong roots processing.
-  SubTasksDone* _gen_process_strong_tasks;
-  SubTasksDone* gen_process_strong_tasks() { return _gen_process_strong_tasks; }
+  // (gen-specific) roots processing.
+  SubTasksDone* _gen_process_roots_tasks;
+  SubTasksDone* gen_process_roots_tasks() { return _gen_process_roots_tasks; }
 
   // In block contents verification, the number of header words to skip
   NOT_PRODUCT(static size_t _skip_header_HeapWords;)
@@ -403,18 +403,30 @@
   // The "so" argument determines which of the roots
   // the closure is applied to:
   // "SO_None" does none;
-  // "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
-  // "SO_SystemClasses" to all the "system" classes and loaders;
-  // "SO_Strings" applies the closure to all entries in the StringTable.
-  void gen_process_strong_roots(int level,
-                                bool younger_gens_as_roots,
-                                // The remaining arguments are in an order
-                                // consistent with SharedHeap::process_strong_roots:
-                                bool activate_scope,
-                                SharedHeap::ScanningOption so,
-                                OopsInGenClosure* not_older_gens,
-                                OopsInGenClosure* older_gens,
-                                KlassClosure* klass_closure);
+ private:
+  void gen_process_roots(int level,
+                         bool younger_gens_as_roots,
+                         bool activate_scope,
+                         SharedHeap::ScanningOption so,
+                         OopsInGenClosure* not_older_gens,
+                         OopsInGenClosure* weak_roots,
+                         OopsInGenClosure* older_gens,
+                         CLDClosure* cld_closure,
+                         CLDClosure* weak_cld_closure,
+                         CodeBlobClosure* code_closure);
+
+ public:
+  static const bool StrongAndWeakRoots = false;
+  static const bool StrongRootsOnly    = true;
+
+  void gen_process_roots(int level,
+                         bool younger_gens_as_roots,
+                         bool activate_scope,
+                         SharedHeap::ScanningOption so,
+                         bool only_strong_roots,
+                         OopsInGenClosure* not_older_gens,
+                         OopsInGenClosure* older_gens,
+                         CLDClosure* cld_closure);
 
   // Apply "root_closure" to all the weak roots of the system.
   // These include JNI weak roots, string table,
--- a/hotspot/src/share/vm/memory/genMarkSweep.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/memory/genMarkSweep.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -207,13 +207,14 @@
   // Need new claim bits before marking starts.
   ClassLoaderDataGraph::clear_claimed_marks();
 
-  gch->gen_process_strong_roots(level,
-                                false, // Younger gens are not roots.
-                                true,  // activate StrongRootsScope
-                                SharedHeap::SO_SystemClasses,
-                                &follow_root_closure,
-                                &follow_root_closure,
-                                &follow_klass_closure);
+  gch->gen_process_roots(level,
+                         false, // Younger gens are not roots.
+                         true,  // activate StrongRootsScope
+                         SharedHeap::SO_None,
+                         GenCollectedHeap::StrongRootsOnly,
+                         &follow_root_closure,
+                         &follow_root_closure,
+                         &follow_cld_closure);
 
   // Process reference objects found during marking
   {
@@ -291,13 +292,14 @@
   // are run.
   adjust_pointer_closure.set_orig_generation(gch->get_gen(level));
 
-  gch->gen_process_strong_roots(level,
-                                false, // Younger gens are not roots.
-                                true,  // activate StrongRootsScope
-                                SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache,
-                                &adjust_pointer_closure,
-                                &adjust_pointer_closure,
-                                &adjust_klass_closure);
+  gch->gen_process_roots(level,
+                         false, // Younger gens are not roots.
+                         true,  // activate StrongRootsScope
+                         SharedHeap::SO_AllCodeCache,
+                         GenCollectedHeap::StrongAndWeakRoots,
+                         &adjust_pointer_closure,
+                         &adjust_pointer_closure,
+                         &adjust_cld_closure);
 
   gch->gen_process_weak_roots(&adjust_pointer_closure);
 
--- a/hotspot/src/share/vm/memory/generation.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/memory/generation.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -154,8 +154,7 @@
 
 DefNewGeneration* Generation::as_DefNewGeneration() {
   assert((kind() == Generation::DefNew) ||
-         (kind() == Generation::ParNew) ||
-         (kind() == Generation::ASParNew),
+         (kind() == Generation::ParNew),
     "Wrong youngest generation type");
   return (DefNewGeneration*) this;
 }
--- a/hotspot/src/share/vm/memory/generation.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/memory/generation.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -131,8 +131,6 @@
  public:
   // The set of possible generation kinds.
   enum Name {
-    ASParNew,
-    ASConcurrentMarkSweep,
     DefNew,
     ParNew,
     MarkSweepCompact,
--- a/hotspot/src/share/vm/memory/generationSpec.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/memory/generationSpec.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -32,7 +32,6 @@
 #include "runtime/java.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
-#include "gc_implementation/parNew/asParNewGeneration.hpp"
 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
 #include "gc_implementation/parNew/parNewGeneration.hpp"
 #endif // INCLUDE_ALL_GCS
@@ -50,12 +49,6 @@
     case Generation::ParNew:
       return new ParNewGeneration(rs, init_size(), level);
 
-    case Generation::ASParNew:
-      return new ASParNewGeneration(rs,
-                                    init_size(),
-                                    init_size() /* min size */,
-                                    level);
-
     case Generation::ConcurrentMarkSweep: {
       assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set");
       CardTableRS* ctrs = remset->as_CardTableRS();
@@ -75,26 +68,6 @@
 
       return g;
     }
-
-    case Generation::ASConcurrentMarkSweep: {
-      assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set");
-      CardTableRS* ctrs = remset->as_CardTableRS();
-      if (ctrs == NULL) {
-        vm_exit_during_initialization("Rem set incompatibility.");
-      }
-      // Otherwise
-      // The constructor creates the CMSCollector if needed,
-      // else registers with an existing CMSCollector
-
-      ASConcurrentMarkSweepGeneration* g = NULL;
-      g = new ASConcurrentMarkSweepGeneration(rs,
-                 init_size(), level, ctrs, UseCMSAdaptiveFreeLists,
-                 (FreeBlockDictionary<FreeChunk>::DictionaryChoice)CMSDictionaryChoice);
-
-      g->initialize_performance_counters();
-
-      return g;
-    }
 #endif // INCLUDE_ALL_GCS
 
     default:
--- a/hotspot/src/share/vm/memory/iterator.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/memory/iterator.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -35,6 +35,10 @@
   cld->oops_do(_oop_closure, &_klass_closure, _must_claim_cld);
 }
 
+void CLDToKlassAndOopClosure::do_cld(ClassLoaderData* cld) {
+  cld->oops_do(_oop_closure, _klass_closure, _must_claim_cld);
+}
+
 void ObjectToOopClosure::do_object(oop obj) {
   obj->oop_iterate(_cl);
 }
@@ -43,6 +47,20 @@
   ShouldNotCallThis();
 }
 
+void CodeBlobToOopClosure::do_nmethod(nmethod* nm) {
+  nm->oops_do(_cl);
+  if (_fix_relocations) {
+    nm->fix_oop_relocations();
+  }
+}
+
+void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
+  nmethod* nm = cb->as_nmethod_or_null();
+  if (nm != NULL) {
+    do_nmethod(nm);
+  }
+}
+
 MarkingCodeBlobClosure::MarkScope::MarkScope(bool activate)
   : _active(activate)
 {
@@ -55,32 +73,7 @@
 
 void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
   nmethod* nm = cb->as_nmethod_or_null();
-  if (nm == NULL)  return;
-  if (!nm->test_set_oops_do_mark()) {
-    NOT_PRODUCT(if (TraceScavenge)  nm->print_on(tty, "oops_do, 1st visit\n"));
-    do_newly_marked_nmethod(nm);
-  } else {
-    NOT_PRODUCT(if (TraceScavenge)  nm->print_on(tty, "oops_do, skipped on 2nd visit\n"));
+  if (nm != NULL && !nm->test_set_oops_do_mark()) {
+    do_nmethod(nm);
   }
 }
-
-void CodeBlobToOopClosure::do_newly_marked_nmethod(nmethod* nm) {
-  nm->oops_do(_cl, /*allow_zombie=*/ false);
-}
-
-void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
-  if (!_do_marking) {
-    nmethod* nm = cb->as_nmethod_or_null();
-    NOT_PRODUCT(if (TraceScavenge && Verbose && nm != NULL)  nm->print_on(tty, "oops_do, unmarked visit\n"));
-    // This assert won't work, since there are lots of mini-passes
-    // (mostly in debug mode) that co-exist with marking phases.
-    //assert(!(cb->is_nmethod() && ((nmethod*)cb)->test_oops_do_mark()), "found marked nmethod during mark-free phase");
-    if (nm != NULL) {
-      nm->oops_do(_cl);
-    }
-  } else {
-    MarkingCodeBlobClosure::do_code_blob(cb);
-  }
-}
-
-
--- a/hotspot/src/share/vm/memory/iterator.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/memory/iterator.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -70,8 +70,8 @@
   //
   // Providing default implementations of the _nv functions unfortunately
   // removes the compile-time safeness, but reduces the clutter for the
-  // ExtendedOopClosures that don't need to walk the metadata. Currently,
-  // only CMS needs these.
+  // ExtendedOopClosures that don't need to walk the metadata.
+  // Currently, only CMS and G1 need these.
 
   virtual bool do_metadata() { return do_metadata_nv(); }
   bool do_metadata_v()       { return do_metadata(); }
@@ -126,15 +126,16 @@
     _oop_closure = oop_closure;
   }
 
-public:
+ public:
   KlassToOopClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) {}
+
   virtual void do_klass(Klass* k);
 };
 
 class CLDToOopClosure : public CLDClosure {
-  OopClosure* _oop_closure;
+  OopClosure*       _oop_closure;
   KlassToOopClosure _klass_closure;
-  bool _must_claim_cld;
+  bool              _must_claim_cld;
 
  public:
   CLDToOopClosure(OopClosure* oop_closure, bool must_claim_cld = true) :
@@ -145,6 +146,23 @@
   void do_cld(ClassLoaderData* cld);
 };
 
+class CLDToKlassAndOopClosure : public CLDClosure {
+  friend class SharedHeap;
+  friend class G1CollectedHeap;
+ protected:
+  OopClosure*   _oop_closure;
+  KlassClosure* _klass_closure;
+  bool          _must_claim_cld;
+ public:
+  CLDToKlassAndOopClosure(KlassClosure* klass_closure,
+                          OopClosure* oop_closure,
+                          bool must_claim_cld) :
+                              _oop_closure(oop_closure),
+                              _klass_closure(klass_closure),
+                              _must_claim_cld(must_claim_cld) {}
+  void do_cld(ClassLoaderData* cld);
+};
+
 // The base class for all concurrent marking closures,
 // that participates in class unloading.
 // It's used to proxy through the metadata to the oops defined in them.
@@ -246,14 +264,26 @@
   virtual void do_code_blob(CodeBlob* cb) = 0;
 };
 
-
-class MarkingCodeBlobClosure : public CodeBlobClosure {
+// Applies an oop closure to all ref fields in code blobs
+// iterated over in an object iteration.
+class CodeBlobToOopClosure : public CodeBlobClosure {
+  OopClosure* _cl;
+  bool _fix_relocations;
+ protected:
+  void do_nmethod(nmethod* nm);
  public:
+  CodeBlobToOopClosure(OopClosure* cl, bool fix_relocations) : _cl(cl), _fix_relocations(fix_relocations) {}
+  virtual void do_code_blob(CodeBlob* cb);
+
+  const static bool FixRelocations = true;
+};
+
+class MarkingCodeBlobClosure : public CodeBlobToOopClosure {
+ public:
+  MarkingCodeBlobClosure(OopClosure* cl, bool fix_relocations) : CodeBlobToOopClosure(cl, fix_relocations) {}
   // Called for each code blob, but at most once per unique blob.
-  virtual void do_newly_marked_nmethod(nmethod* nm) = 0;
 
   virtual void do_code_blob(CodeBlob* cb);
-    // = { if (!nmethod(cb)->test_set_oops_do_mark())  do_newly_marked_nmethod(cb); }
 
   class MarkScope : public StackObj {
   protected:
@@ -266,23 +296,6 @@
   };
 };
 
-
-// Applies an oop closure to all ref fields in code blobs
-// iterated over in an object iteration.
-class CodeBlobToOopClosure: public MarkingCodeBlobClosure {
-  OopClosure* _cl;
-  bool _do_marking;
-public:
-  virtual void do_newly_marked_nmethod(nmethod* cb);
-    // = { cb->oops_do(_cl); }
-  virtual void do_code_blob(CodeBlob* cb);
-    // = { if (_do_marking)  super::do_code_blob(cb); else cb->oops_do(_cl); }
-  CodeBlobToOopClosure(OopClosure* cl, bool do_marking)
-    : _cl(cl), _do_marking(do_marking) {}
-};
-
-
-
 // MonitorClosure is used for iterating over monitors in the monitors cache
 
 class ObjectMonitor;
--- a/hotspot/src/share/vm/memory/metadataFactory.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/memory/metadataFactory.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_MEMORY_METADATAFACTORY_HPP
 #define SHARE_VM_MEMORY_METADATAFACTORY_HPP
 
+#include "classfile/classLoaderData.hpp"
 #include "utilities/array.hpp"
 #include "utilities/exceptions.hpp"
 #include "utilities/globalDefinitions.hpp"
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -35,6 +35,7 @@
 #include "memory/metaspaceShared.hpp"
 #include "oops/objArrayOop.hpp"
 #include "oops/oop.inline.hpp"
+#include "runtime/os.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/vm_operations.hpp"
 #include "runtime/vmThread.hpp"
--- a/hotspot/src/share/vm/memory/sharedHeap.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/memory/sharedHeap.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -29,6 +29,7 @@
 #include "gc_interface/collectedHeap.inline.hpp"
 #include "memory/sharedHeap.hpp"
 #include "oops/oop.inline.hpp"
+#include "runtime/atomic.inline.hpp"
 #include "runtime/fprofiler.hpp"
 #include "runtime/java.hpp"
 #include "services/management.hpp"
@@ -39,8 +40,8 @@
 
 SharedHeap* SharedHeap::_sh;
 
-// The set of potentially parallel tasks in strong root scanning.
-enum SH_process_strong_roots_tasks {
+// The set of potentially parallel tasks in root scanning.
+enum SH_process_roots_tasks {
   SH_PS_Universe_oops_do,
   SH_PS_JNIHandles_oops_do,
   SH_PS_ObjectSynchronizer_oops_do,
@@ -58,6 +59,7 @@
   CollectedHeap(),
   _collector_policy(policy_),
   _rem_set(NULL),
+  _strong_roots_scope(NULL),
   _strong_roots_parity(0),
   _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
   _workers(NULL)
@@ -114,6 +116,19 @@
 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 #endif
 
+SharedHeap::StrongRootsScope* SharedHeap::active_strong_roots_scope() const {
+  return _strong_roots_scope;
+}
+void SharedHeap::register_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
+  assert(_strong_roots_scope == NULL, "Should only have one StrongRootsScope active");
+  assert(scope != NULL, "Illegal argument");
+  _strong_roots_scope = scope;
+}
+void SharedHeap::unregister_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
+  assert(_strong_roots_scope == scope, "Wrong scope unregistered");
+  _strong_roots_scope = NULL;
+}
+
 void SharedHeap::change_strong_roots_parity() {
   // Also set the new collection parity.
   assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
@@ -124,111 +139,160 @@
          "Not in range.");
 }
 
-SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* outer, bool activate)
-  : MarkScope(activate)
+SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate)
+  : MarkScope(activate), _sh(heap), _n_workers_done_with_threads(0)
 {
   if (_active) {
-    outer->change_strong_roots_parity();
+    _sh->register_strong_roots_scope(this);
+    _sh->change_strong_roots_parity();
     // Zero the claimed high water mark in the StringTable
     StringTable::clear_parallel_claimed_index();
   }
 }
 
 SharedHeap::StrongRootsScope::~StrongRootsScope() {
-  // nothing particular
+  if (_active) {
+    _sh->unregister_strong_roots_scope(this);
+  }
+}
+
+Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false);
+
+void SharedHeap::StrongRootsScope::mark_worker_done_with_threads(uint n_workers) {
+  // The Thread work barrier is only needed by G1.
+  // No need to use the barrier if this is single-threaded code.
+  if (UseG1GC && n_workers > 0) {
+    uint new_value = (uint)Atomic::add(1, &_n_workers_done_with_threads);
+    if (new_value == n_workers) {
+      // This thread is last. Notify the others.
+      MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
+      _lock->notify_all();
+    }
+  }
+}
+
+void SharedHeap::StrongRootsScope::wait_until_all_workers_done_with_threads(uint n_workers) {
+  // No need to use the barrier if this is single-threaded code.
+  if (n_workers > 0 && (uint)_n_workers_done_with_threads != n_workers) {
+    MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
+    while ((uint)_n_workers_done_with_threads != n_workers) {
+      _lock->wait(Mutex::_no_safepoint_check_flag, 0, false);
+    }
+  }
+}
+
+void SharedHeap::process_roots(bool activate_scope,
+                               ScanningOption so,
+                               OopClosure* strong_roots,
+                               OopClosure* weak_roots,
+                               CLDClosure* strong_cld_closure,
+                               CLDClosure* weak_cld_closure,
+                               CodeBlobClosure* code_roots) {
+  StrongRootsScope srs(this, activate_scope);
+
+  // General roots.
+  assert(_strong_roots_parity != 0, "must have called prologue code");
+  assert(code_roots != NULL, "code root closure should always be set");
+  // _n_termination for _process_strong_tasks should be set up stream
+  // in a method not running in a GC worker.  Otherwise the GC worker
+  // could be trying to change the termination condition while the task
+  // is executing in another GC worker.
+
+  // Iterating over the CLDG and the Threads are done early to allow G1 to
+  // first process the strong CLDs and nmethods and then, after a barrier,
+  // let the thread process the weak CLDs and nmethods.
+
+  if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
+    ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
+  }
+
+  // Some CLDs contained in the thread frames should be considered strong.
+  // Don't process them if they will be processed during the ClassLoaderDataGraph phase.
+  CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
+  // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
+  CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
+
+  Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
+
+  // This is the point where this worker thread will not find more strong CLDs/nmethods.
+  // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
+  active_strong_roots_scope()->mark_worker_done_with_threads(n_par_threads());
+
+  if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
+    Universe::oops_do(strong_roots);
+  }
+  // Global (strong) JNI handles
+  if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
+    JNIHandles::oops_do(strong_roots);
+
+  if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
+    ObjectSynchronizer::oops_do(strong_roots);
+  if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
+    FlatProfiler::oops_do(strong_roots);
+  if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
+    Management::oops_do(strong_roots);
+  if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
+    JvmtiExport::oops_do(strong_roots);
+
+  if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
+    SystemDictionary::roots_oops_do(strong_roots, weak_roots);
+  }
+
+  // All threads execute the following. A specific chunk of buckets
+  // from the StringTable are the individual tasks.
+  if (weak_roots != NULL) {
+    if (CollectedHeap::use_parallel_gc_threads()) {
+      StringTable::possibly_parallel_oops_do(weak_roots);
+    } else {
+      StringTable::oops_do(weak_roots);
+    }
+  }
+
+  if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
+    if (so & SO_ScavengeCodeCache) {
+      assert(code_roots != NULL, "must supply closure for code cache");
+
+      // We only visit parts of the CodeCache when scavenging.
+      CodeCache::scavenge_root_nmethods_do(code_roots);
+    }
+    if (so & SO_AllCodeCache) {
+      assert(code_roots != NULL, "must supply closure for code cache");
+
+      // CMSCollector uses this to do intermediate-strength collections.
+      // We scan the entire code cache, since CodeCache::do_unloading is not called.
+      CodeCache::blobs_do(code_roots);
+    }
+    // Verify that the code cache contents are not subject to
+    // movement by a scavenging collection.
+    DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
+    DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
+  }
+
+  _process_strong_tasks->all_tasks_completed();
+}
+
+void SharedHeap::process_all_roots(bool activate_scope,
+                                   ScanningOption so,
+                                   OopClosure* roots,
+                                   CLDClosure* cld_closure,
+                                   CodeBlobClosure* code_closure) {
+  process_roots(activate_scope, so,
+                roots, roots,
+                cld_closure, cld_closure,
+                code_closure);
 }
 
 void SharedHeap::process_strong_roots(bool activate_scope,
                                       ScanningOption so,
                                       OopClosure* roots,
-                                      KlassClosure* klass_closure) {
-  StrongRootsScope srs(this, activate_scope);
-
-  // General strong roots.
-  assert(_strong_roots_parity != 0, "must have called prologue code");
-  // _n_termination for _process_strong_tasks should be set up stream
-  // in a method not running in a GC worker.  Otherwise the GC worker
-  // could be trying to change the termination condition while the task
-  // is executing in another GC worker.
-  if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
-    Universe::oops_do(roots);
-  }
-  // Global (strong) JNI handles
-  if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
-    JNIHandles::oops_do(roots);
-
-  CodeBlobToOopClosure code_roots(roots, true);
-
-  CLDToOopClosure roots_from_clds(roots);
-  // If we limit class scanning to SO_SystemClasses we need to apply a CLD closure to
-  // CLDs which are strongly reachable from the thread stacks.
-  CLDToOopClosure* roots_from_clds_p = ((so & SO_SystemClasses) ? &roots_from_clds : NULL);
-  // All threads execute this; the individual threads are task groups.
-  if (CollectedHeap::use_parallel_gc_threads()) {
-    Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, &code_roots);
-  } else {
-    Threads::oops_do(roots, roots_from_clds_p, &code_roots);
-  }
-
-  if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
-    ObjectSynchronizer::oops_do(roots);
-  if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
-    FlatProfiler::oops_do(roots);
-  if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
-    Management::oops_do(roots);
-  if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
-    JvmtiExport::oops_do(roots);
+                                      CLDClosure* cld_closure,
+                                      CodeBlobClosure* code_closure) {
+  process_roots(activate_scope, so,
+                roots, NULL,
+                cld_closure, NULL,
+                code_closure);
+}
 
-  if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
-    if (so & SO_AllClasses) {
-      SystemDictionary::oops_do(roots);
-    } else if (so & SO_SystemClasses) {
-      SystemDictionary::always_strong_oops_do(roots);
-    } else {
-      fatal("We should always have selected either SO_AllClasses or SO_SystemClasses");
-    }
-  }
-
-  if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
-    if (so & SO_AllClasses) {
-      ClassLoaderDataGraph::oops_do(roots, klass_closure, /* must_claim */ false);
-    } else if (so & SO_SystemClasses) {
-      ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, /* must_claim */ true);
-    }
-  }
-
-  // All threads execute the following. A specific chunk of buckets
-  // from the StringTable are the individual tasks.
-  if (so & SO_Strings) {
-    if (CollectedHeap::use_parallel_gc_threads()) {
-      StringTable::possibly_parallel_oops_do(roots);
-    } else {
-      StringTable::oops_do(roots);
-    }
-  }
-
-  if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
-    if (so & SO_ScavengeCodeCache) {
-      assert(&code_roots != NULL, "must supply closure for code cache");
-
-      // We only visit parts of the CodeCache when scavenging.
-      CodeCache::scavenge_root_nmethods_do(&code_roots);
-    }
-    if (so & SO_AllCodeCache) {
-      assert(&code_roots != NULL, "must supply closure for code cache");
-
-      // CMSCollector uses this to do intermediate-strength collections.
-      // We scan the entire code cache, since CodeCache::do_unloading is not called.
-      CodeCache::blobs_do(&code_roots);
-    }
-    // Verify that the code cache contents are not subject to
-    // movement by a scavenging collection.
-    DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false));
-    DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
-  }
-
-  _process_strong_tasks->all_tasks_completed();
-}
 
 class AlwaysTrueClosure: public BoolObjectClosure {
 public:
--- a/hotspot/src/share/vm/memory/sharedHeap.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/memory/sharedHeap.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -69,14 +69,10 @@
 //    number of active GC workers.  CompactibleFreeListSpace and Space
 //    have SequentialSubTasksDone's.
 // Example of using SubTasksDone and SequentialSubTasksDone
-// G1CollectedHeap::g1_process_strong_roots() calls
-//  process_strong_roots(false, // no scoping; this is parallel code
-//                       is_scavenging, so,
-//                       &buf_scan_non_heap_roots,
-//                       &eager_scan_code_roots);
-//  which delegates to SharedHeap::process_strong_roots() and uses
+// G1CollectedHeap::g1_process_roots()
+//  to SharedHeap::process_roots() and uses
 //  SubTasksDone* _process_strong_tasks to claim tasks.
-//  process_strong_roots() calls
+//  process_roots() calls
 //      rem_set()->younger_refs_iterate()
 //  to scan the card table and which eventually calls down into
 //  CardTableModRefBS::par_non_clean_card_iterate_work().  This method
@@ -182,12 +178,12 @@
   // task.  (This also means that a parallel thread may only call
   // process_strong_roots once.)
   //
-  // For calls to process_strong_roots by sequential code, the parity is
+  // For calls to process_roots by sequential code, the parity is
   // updated automatically.
   //
   // The idea is that objects representing fine-grained tasks, such as
   // threads, will contain a "parity" field.  A task will is claimed in the
-  // current "process_strong_roots" call only if its parity field is the
+  // current "process_roots" call only if its parity field is the
   // same as the "strong_roots_parity"; task claiming is accomplished by
   // updating the parity field to the strong_roots_parity with a CAS.
   //
@@ -198,27 +194,44 @@
   //   c) to never return a distinguished value (zero) with which such
   //      task-claiming variables may be initialized, to indicate "never
   //      claimed".
- private:
-  void change_strong_roots_parity();
  public:
   int strong_roots_parity() { return _strong_roots_parity; }
 
-  // Call these in sequential code around process_strong_roots.
+  // Call these in sequential code around process_roots.
   // strong_roots_prologue calls change_strong_roots_parity, if
   // parallel tasks are enabled.
   class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
-  public:
-    StrongRootsScope(SharedHeap* outer, bool activate = true);
+    // Used to implement the Thread work barrier.
+    static Monitor* _lock;
+
+    SharedHeap*   _sh;
+    volatile jint _n_workers_done_with_threads;
+
+   public:
+    StrongRootsScope(SharedHeap* heap, bool activate = true);
     ~StrongRootsScope();
+
+    // Mark that this thread is done with the Threads work.
+    void mark_worker_done_with_threads(uint n_workers);
+    // Wait until all n_workers are done with the Threads work.
+    void wait_until_all_workers_done_with_threads(uint n_workers);
   };
   friend class StrongRootsScope;
 
+  // The current active StrongRootScope
+  StrongRootsScope* _strong_roots_scope;
+
+  StrongRootsScope* active_strong_roots_scope() const;
+
+ private:
+  void register_strong_roots_scope(StrongRootsScope* scope);
+  void unregister_strong_roots_scope(StrongRootsScope* scope);
+  void change_strong_roots_parity();
+
+ public:
   enum ScanningOption {
-    SO_None                = 0x0,
-    SO_AllClasses          = 0x1,
-    SO_SystemClasses       = 0x2,
-    SO_Strings             = 0x4,
-    SO_AllCodeCache        = 0x8,
+    SO_None                =  0x0,
+    SO_AllCodeCache        =  0x8,
     SO_ScavengeCodeCache   = 0x10
   };
 
@@ -227,15 +240,26 @@
   // Invoke the "do_oop" method the closure "roots" on all root locations.
   // The "so" argument determines which roots the closure is applied to:
   // "SO_None" does none;
-  // "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
-  // "SO_SystemClasses" to all the "system" classes and loaders;
-  // "SO_Strings" applies the closure to all entries in StringTable;
   // "SO_AllCodeCache" applies the closure to all elements of the CodeCache.
   // "SO_ScavengeCodeCache" applies the closure to elements on the scavenge root list in the CodeCache.
+  void process_roots(bool activate_scope,
+                     ScanningOption so,
+                     OopClosure* strong_roots,
+                     OopClosure* weak_roots,
+                     CLDClosure* strong_cld_closure,
+                     CLDClosure* weak_cld_closure,
+                     CodeBlobClosure* code_roots);
+  void process_all_roots(bool activate_scope,
+                         ScanningOption so,
+                         OopClosure* roots,
+                         CLDClosure* cld_closure,
+                         CodeBlobClosure* code_roots);
   void process_strong_roots(bool activate_scope,
                             ScanningOption so,
                             OopClosure* roots,
-                            KlassClosure* klass_closure);
+                            CLDClosure* cld_closure,
+                            CodeBlobClosure* code_roots);
+
 
   // Apply "root_closure" to the JNI weak roots..
   void process_weak_roots(OopClosure* root_closure);
@@ -251,7 +275,7 @@
   virtual void gc_epilogue(bool full) = 0;
 
   // Sets the number of parallel threads that will be doing tasks
-  // (such as process strong roots) subsequently.
+  // (such as process roots) subsequently.
   virtual void set_par_threads(uint t);
 
   int n_termination();
--- a/hotspot/src/share/vm/memory/space.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/memory/space.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -685,14 +685,8 @@
 // This version requires locking.
 inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
                                                 HeapWord* const end_value) {
-  // In G1 there are places where a GC worker can allocates into a
-  // region using this serial allocation code without being prone to a
-  // race with other GC workers (we ensure that no other GC worker can
-  // access the same region at the same time). So the assert below is
-  // too strong in the case of G1.
   assert(Heap_lock->owned_by_self() ||
-         (SafepointSynchronize::is_at_safepoint() &&
-                               (Thread::current()->is_VM_thread() || UseG1GC)),
+         (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
          "not locked");
   HeapWord* obj = top();
   if (pointer_delta(end_value, obj) >= size) {
--- a/hotspot/src/share/vm/memory/universe.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/memory/universe.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -72,7 +72,7 @@
 #include "utilities/preserveException.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
-#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
+#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
@@ -802,13 +802,9 @@
       gc_policy = new MarkSweepPolicy();
     } else if (UseConcMarkSweepGC) {
 #if INCLUDE_ALL_GCS
-      if (UseAdaptiveSizePolicy) {
-        gc_policy = new ASConcurrentMarkSweepPolicy();
-      } else {
-        gc_policy = new ConcurrentMarkSweepPolicy();
-      }
+      gc_policy = new ConcurrentMarkSweepPolicy();
 #else  // INCLUDE_ALL_GCS
-    fatal("UseConcMarkSweepGC not supported in this VM.");
+      fatal("UseConcMarkSweepGC not supported in this VM.");
 #endif // INCLUDE_ALL_GCS
     } else { // default old generation
       gc_policy = new MarkSweepPolicy();
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -245,6 +245,7 @@
   set_static_oop_field_count(0);
   set_nonstatic_field_size(0);
   set_is_marked_dependent(false);
+  set_has_unloaded_dependent(false);
   set_init_state(InstanceKlass::allocated);
   set_init_thread(NULL);
   set_reference_type(rt);
@@ -1801,6 +1802,9 @@
   return id;
 }
 
+int nmethodBucket::decrement() {
+  return Atomic::add(-1, (volatile int *)&_count);
+}
 
 //
 // Walk the list of dependent nmethods searching for nmethods which
@@ -1815,7 +1819,7 @@
     nmethod* nm = b->get_nmethod();
     // since dependencies aren't removed until an nmethod becomes a zombie,
     // the dependency list may contain nmethods which aren't alive.
-    if (nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
+    if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
       if (TraceDependencies) {
         ResourceMark rm;
         tty->print_cr("Marked for deoptimization");
@@ -1832,6 +1836,43 @@
   return found;
 }
 
+void InstanceKlass::clean_dependent_nmethods() {
+  assert_locked_or_safepoint(CodeCache_lock);
+
+  if (has_unloaded_dependent()) {
+    nmethodBucket* b = _dependencies;
+    nmethodBucket* last = NULL;
+    while (b != NULL) {
+      assert(b->count() >= 0, err_msg("bucket count: %d", b->count()));
+
+      nmethodBucket* next = b->next();
+
+      if (b->count() == 0) {
+        if (last == NULL) {
+          _dependencies = next;
+        } else {
+          last->set_next(next);
+        }
+        delete b;
+        // last stays the same.
+      } else {
+        last = b;
+      }
+
+      b = next;
+    }
+    set_has_unloaded_dependent(false);
+  }
+#ifdef ASSERT
+  else {
+    // Verification
+    for (nmethodBucket* b = _dependencies; b != NULL; b = b->next()) {
+      assert(b->count() >= 0, err_msg("bucket count: %d", b->count()));
+      assert(b->count() != 0, "empty buckets need to be cleaned");
+    }
+  }
+#endif
+}
 
 //
 // Add an nmethodBucket to the list of dependencies for this nmethod.
@@ -1866,13 +1907,10 @@
   nmethodBucket* last = NULL;
   while (b != NULL) {
     if (nm == b->get_nmethod()) {
-      if (b->decrement() == 0) {
-        if (last == NULL) {
-          _dependencies = b->next();
-        } else {
-          last->set_next(b->next());
-        }
-        delete b;
+      int val = b->decrement();
+      guarantee(val >= 0, err_msg("Underflow: %d", val));
+      if (val == 0) {
+        set_has_unloaded_dependent(true);
       }
       return;
     }
@@ -1911,6 +1949,10 @@
   nmethodBucket* b = _dependencies;
   while (b != NULL) {
     if (nm == b->get_nmethod()) {
+#ifdef ASSERT
+      int count = b->count();
+      assert(count >= 0, err_msg("count shouldn't be negative: %d", count));
+#endif
       return true;
     }
     b = b->next();
@@ -2209,7 +2251,7 @@
 #endif // INCLUDE_ALL_GCS
 
 void InstanceKlass::clean_implementors_list(BoolObjectClosure* is_alive) {
-  assert(is_loader_alive(is_alive), "this klass should be live");
+  assert(class_loader_data()->is_alive(is_alive), "this klass should be live");
   if (is_interface()) {
     if (ClassUnloading) {
       Klass* impl = implementor();
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -197,6 +197,7 @@
   // _is_marked_dependent can be set concurrently, thus cannot be part of the
   // _misc_flags.
   bool            _is_marked_dependent;  // used for marking during flushing and deoptimization
+  bool            _has_unloaded_dependent;
 
   enum {
     _misc_rewritten            = 1 << 0, // methods rewritten.
@@ -444,6 +445,9 @@
   bool is_marked_dependent() const         { return _is_marked_dependent; }
   void set_is_marked_dependent(bool value) { _is_marked_dependent = value; }
 
+  bool has_unloaded_dependent() const         { return _has_unloaded_dependent; }
+  void set_has_unloaded_dependent(bool value) { _has_unloaded_dependent = value; }
+
   // initialization (virtuals from Klass)
   bool should_be_initialized() const;  // means that initialize should be called
   void initialize(TRAPS);
@@ -922,6 +926,7 @@
 
   void clean_implementors_list(BoolObjectClosure* is_alive);
   void clean_method_data(BoolObjectClosure* is_alive);
+  void clean_dependent_nmethods();
 
   // Explicit metaspace deallocation of fields
   // For RedefineClasses and class file parsing errors, we need to deallocate
@@ -1210,7 +1215,7 @@
   }
   int count()                             { return _count; }
   int increment()                         { _count += 1; return _count; }
-  int decrement()                         { _count -= 1; assert(_count >= 0, "don't underflow"); return _count; }
+  int decrement();
   nmethodBucket* next()                   { return _next; }
   void set_next(nmethodBucket* b)         { _next = b; }
   nmethod* get_nmethod()                  { return _nmethod; }
--- a/hotspot/src/share/vm/oops/klass.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/oops/klass.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -42,6 +42,7 @@
 #include "utilities/stack.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
@@ -159,7 +160,12 @@
   _primary_supers[0] = k;
   set_super_check_offset(in_bytes(primary_supers_offset()));
 
-  set_java_mirror(NULL);
+  // The constructor is used from init_self_patching_vtbl_list,
+  // which doesn't zero out the memory before calling the constructor.
+  // Need to set the field explicitly to not hit an assert that the field
+  // should be NULL before setting it.
+  _java_mirror = NULL;
+
   set_modifier_flags(0);
   set_layout_helper(Klass::_lh_neutral_value);
   set_name(NULL);
@@ -383,7 +389,7 @@
   return mirror_alive;
 }
 
-void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive) {
+void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive, bool clean_alive_klasses) {
   if (!ClassUnloading) {
     return;
   }
@@ -428,7 +434,7 @@
     }
 
     // Clean the implementors list and method data.
-    if (current->oop_is_instance()) {
+    if (clean_alive_klasses && current->oop_is_instance()) {
       InstanceKlass* ik = InstanceKlass::cast(current);
       ik->clean_implementors_list(is_alive);
       ik->clean_method_data(is_alive);
@@ -440,12 +446,18 @@
   record_modified_oops();
 }
 
-void Klass::klass_update_barrier_set_pre(void* p, oop v) {
-  // This barrier used by G1, where it's used remember the old oop values,
-  // so that we don't forget any objects that were live at the snapshot at
-  // the beginning. This function is only used when we write oops into
-  // Klasses. Since the Klasses are used as roots in G1, we don't have to
-  // do anything here.
+// This barrier is used by G1 to remember the old oop values, so
+// that we don't forget any objects that were live at the snapshot at
+// the beginning. This function is only used when we write oops into Klasses.
+void Klass::klass_update_barrier_set_pre(oop* p, oop v) {
+#if INCLUDE_ALL_GCS
+  if (UseG1GC) {
+    oop obj = *p;
+    if (obj != NULL) {
+      G1SATBCardTableModRefBS::enqueue(obj);
+    }
+  }
+#endif
 }
 
 void Klass::klass_oop_store(oop* p, oop v) {
@@ -456,7 +468,7 @@
   if (always_do_update_barrier) {
     klass_oop_store((volatile oop*)p, v);
   } else {
-    klass_update_barrier_set_pre((void*)p, v);
+    klass_update_barrier_set_pre(p, v);
     *p = v;
     klass_update_barrier_set(v);
   }
@@ -466,7 +478,7 @@
   assert(!Universe::heap()->is_in_reserved((void*)p), "Should store pointer into metadata");
   assert(v == NULL || Universe::heap()->is_in_reserved((void*)v), "Should store pointer to an object");
 
-  klass_update_barrier_set_pre((void*)p, v);
+  klass_update_barrier_set_pre((oop*)p, v); // Cast away volatile.
   OrderAccess::release_store_ptr(p, v);
   klass_update_barrier_set(v);
 }
--- a/hotspot/src/share/vm/oops/klass.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/oops/klass.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -553,7 +553,10 @@
   // The is_alive closure passed in depends on the Garbage Collector used.
   bool is_loader_alive(BoolObjectClosure* is_alive);
 
-  static void clean_weak_klass_links(BoolObjectClosure* is_alive);
+  static void clean_weak_klass_links(BoolObjectClosure* is_alive, bool clean_alive_klasses = true);
+  static void clean_subklass_tree(BoolObjectClosure* is_alive) {
+    clean_weak_klass_links(is_alive, false /* clean_alive_klasses */);
+  }
 
   // iterators
   virtual int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) = 0;
@@ -660,7 +663,7 @@
  private:
   // barriers used by klass_oop_store
   void klass_update_barrier_set(oop v);
-  void klass_update_barrier_set_pre(void* p, oop v);
+  void klass_update_barrier_set_pre(oop* p, oop v);
 };
 
 #endif // SHARE_VM_OOPS_KLASS_HPP
--- a/hotspot/src/share/vm/oops/klassVtable.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/oops/klassVtable.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -251,6 +251,17 @@
 // For bytecodes not produced by javac together it is possible that a method does not override
 // the superclass's method, but might indirectly override a super-super class's vtable entry
 // If none found, return a null superk, else return the superk of the method this does override
+// For public and protected methods: if they override a superclass, they will
+// also be overridden themselves appropriately.
+// Private methods do not override and are not overridden.
+// Package Private methods are trickier:
+// e.g. P1.A, pub m
+// P2.B extends A, package private m
+// P1.C extends B, public m
+// P1.C.m needs to override P1.A.m and can not override P2.B.m
+// Therefore: all package private methods need their own vtable entries for
+// them to be the root of an inheritance overriding decision
+// Package private methods may also override other vtable entries
 InstanceKlass* klassVtable::find_transitive_override(InstanceKlass* initialsuper, methodHandle target_method,
                             int vtable_index, Handle target_loader, Symbol* target_classname, Thread * THREAD) {
   InstanceKlass* superk = initialsuper;
@@ -398,8 +409,11 @@
                              target_classname, THREAD))
                              != (InstanceKlass*)NULL))))
         {
-        // overriding, so no new entry
-        allocate_new = false;
+        // Package private methods always need a new entry to root their own
+        // overriding. They may also override other methods.
+        if (!target_method()->is_package_private()) {
+          allocate_new = false;
+        }
 
         if (checkconstraints) {
         // Override vtable entry if passes loader constraint check
@@ -543,8 +557,9 @@
                                          AccessFlags class_flags,
                                          TRAPS) {
   if (class_flags.is_interface()) {
-    // Interfaces do not use vtables, so there is no point to assigning
-    // a vtable index to any of their methods.  If we refrain from doing this,
+    // Interfaces do not use vtables, except for java.lang.Object methods,
+    // so there is no point to assigning
+    // a vtable index to any of their local methods.  If we refrain from doing this,
     // we can use Method::_vtable_index to hold the itable index
     return false;
   }
@@ -582,6 +597,12 @@
     return true;
   }
 
+  // Package private methods always need a new entry to root their own
+  // overriding. This allows transitive overriding to work.
+  if (target_method()->is_package_private()) {
+    return true;
+  }
+
   // search through the super class hierarchy to see if we need
   // a new entry
   ResourceMark rm;
--- a/hotspot/src/share/vm/oops/oopsHierarchy.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/oops/oopsHierarchy.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -124,7 +124,7 @@
   operator oopDesc* () const volatile { return obj(); }
   operator intptr_t* () const         { return (intptr_t*)obj(); }
   operator PromotedObject* () const   { return (PromotedObject*)obj(); }
-  operator markOop () const           { return markOop(obj()); }
+  operator markOop () const volatile  { return markOop(obj()); }
   operator address   () const         { return (address)obj(); }
 
   // from javaCalls.cpp
--- a/hotspot/src/share/vm/opto/callGenerator.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/opto/callGenerator.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -871,8 +871,11 @@
           Node*             receiver_node = kit.argument(0);
           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
-          target = C->optimize_virtual_call(caller, jvms->bci(), klass, target, receiver_type,
-                                            is_virtual,
+          // optimize_virtual_call() takes 2 different holder
+          // arguments for a corner case that doesn't apply here (see
+          // Parse::do_call())
+          target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass,
+                                            target, receiver_type, is_virtual,
                                             call_does_dispatch, vtable_index);  // out-parameters
           // We lack profiling at this call but type speculation may
           // provide us with a type
--- a/hotspot/src/share/vm/opto/compile.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -29,6 +29,7 @@
 #include "classfile/systemDictionary.hpp"
 #include "code/exceptionHandlerTable.hpp"
 #include "code/nmethod.hpp"
+#include "compiler/compileBroker.hpp"
 #include "compiler/compileLog.hpp"
 #include "compiler/disassembler.hpp"
 #include "compiler/oopMap.hpp"
@@ -555,6 +556,7 @@
     if (scratch_buffer_blob() == NULL) {
       // Let CompilerBroker disable further compilations.
       record_failure("Not enough space for scratch buffer in CodeCache");
+      CompileBroker::handle_full_code_cache();
       return;
     }
   }
--- a/hotspot/src/share/vm/opto/compile.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/opto/compile.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -852,8 +852,8 @@
 
   // Helper functions to identify inlining potential at call-site
   ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,
-                                  ciMethod* callee, const TypeOopPtr* receiver_type,
-                                  bool is_virtual,
+                                  ciKlass* holder, ciMethod* callee,
+                                  const TypeOopPtr* receiver_type, bool is_virtual,
                                   bool &call_does_dispatch, int &vtable_index);
   ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
                               ciMethod* callee, const TypeOopPtr* receiver_type);
--- a/hotspot/src/share/vm/opto/doCall.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/opto/doCall.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -468,8 +468,14 @@
     Node* receiver_node             = stack(sp() - nargs);
     const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
     // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
-    callee = C->optimize_virtual_call(method(), bci(), klass, orig_callee, receiver_type,
-                                      is_virtual,
+    // For arrays, klass below is Object. When vtable calls are used,
+    // resolving the call with Object would allow an illegal call to
+    // finalize() on an array. We use holder instead: illegal calls to
+    // finalize() won't be compiled as vtable calls (IC call
+    // resolution will catch the illegal call) and the few legal calls
+    // on array types won't be either.
+    callee = C->optimize_virtual_call(method(), bci(), klass, holder, orig_callee,
+                                      receiver_type, is_virtual,
                                       call_does_dispatch, vtable_index);  // out-parameters
     speculative_receiver_type = receiver_type != NULL ? receiver_type->speculative_type() : NULL;
   }
@@ -940,8 +946,8 @@
 
 
 ciMethod* Compile::optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,
-                                         ciMethod* callee, const TypeOopPtr* receiver_type,
-                                         bool is_virtual,
+                                         ciKlass* holder, ciMethod* callee,
+                                         const TypeOopPtr* receiver_type, bool is_virtual,
                                          bool& call_does_dispatch, int& vtable_index) {
   // Set default values for out-parameters.
   call_does_dispatch = true;
@@ -956,7 +962,7 @@
     call_does_dispatch = false;
   } else if (!UseInlineCaches && is_virtual && callee->is_loaded()) {
     // We can make a vtable call at this site
-    vtable_index = callee->resolve_vtable_index(caller->holder(), klass);
+    vtable_index = callee->resolve_vtable_index(caller->holder(), holder);
   }
   return callee;
 }
@@ -979,8 +985,10 @@
   ciInstanceKlass* actual_receiver = klass;
   if (receiver_type != NULL) {
     // Array methods are all inherited from Object, and are monomorphic.
+    // finalize() call on array is not allowed.
     if (receiver_type->isa_aryptr() &&
-        callee->holder() == env()->Object_klass()) {
+        callee->holder() == env()->Object_klass() &&
+        callee->name() != ciSymbol::finalize_method_name()) {
       return callee;
     }
 
--- a/hotspot/src/share/vm/opto/output.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/opto/output.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1163,6 +1163,7 @@
   // Have we run out of code space?
   if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
     C->record_failure("CodeCache is full");
+    CompileBroker::handle_full_code_cache();
     return NULL;
   }
   // Configure the code buffer.
@@ -1487,6 +1488,7 @@
       cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
       if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
         C->record_failure("CodeCache is full");
+        CompileBroker::handle_full_code_cache();
         return;
       }
 
@@ -1643,6 +1645,7 @@
   // One last check for failed CodeBuffer::expand:
   if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
     C->record_failure("CodeCache is full");
+    CompileBroker::handle_full_code_cache();
     return;
   }
 
--- a/hotspot/src/share/vm/opto/parse.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/opto/parse.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -557,8 +557,9 @@
 
   float   dynamic_branch_prediction(float &cnt);
   float   branch_prediction(float &cnt, BoolTest::mask btest, int target_bci);
-  bool    seems_never_taken(float prob);
-  bool    seems_stable_comparison(BoolTest::mask btest, Node* c);
+  bool    seems_never_taken(float prob) const;
+  bool    path_is_suitable_for_uncommon_trap(float prob) const;
+  bool    seems_stable_comparison() const;
 
   void    do_ifnull(BoolTest::mask btest, Node* c);
   void    do_if(BoolTest::mask btest, Node* c);
--- a/hotspot/src/share/vm/opto/parse2.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/opto/parse2.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -886,7 +886,7 @@
 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce
 // very small but nonzero probabilities, which if confused with zero
 // counts would keep the program recompiling indefinitely.
-bool Parse::seems_never_taken(float prob) {
+bool Parse::seems_never_taken(float prob) const {
   return prob < PROB_MIN;
 }
 
@@ -898,7 +898,7 @@
 // already acting in a stable fashion.  If the comparison
 // seems stable, we will put an expensive uncommon trap
 // on the untaken path.
-bool Parse::seems_stable_comparison(BoolTest::mask btest, Node* cmp) {
+bool Parse::seems_stable_comparison() const {
   if (C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if)) {
     return false;
   }
@@ -1127,6 +1127,14 @@
   }
 }
 
+bool Parse::path_is_suitable_for_uncommon_trap(float prob) const {
+  // Don't want to speculate on uncommon traps when running with -Xcomp
+  if (!UseInterpreter) {
+    return false;
+  }
+  return (seems_never_taken(prob) && seems_stable_comparison());
+}
+
 //----------------------------adjust_map_after_if------------------------------
 // Adjust the JVM state to reflect the result of taking this path.
 // Basically, it means inspecting the CmpNode controlling this
@@ -1140,7 +1148,7 @@
 
   bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
 
-  if (seems_never_taken(prob) && seems_stable_comparison(btest, c)) {
+  if (path_is_suitable_for_uncommon_trap(prob)) {
     repush_if_args();
     uncommon_trap(Deoptimization::Reason_unstable_if,
                   Deoptimization::Action_reinterpret,
--- a/hotspot/src/share/vm/prims/jni.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/prims/jni.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -80,18 +80,6 @@
 #include "utilities/dtrace.hpp"
 #include "utilities/events.hpp"
 #include "utilities/histogram.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 
 static jint CurrentVersion = JNI_VERSION_1_8;
 
@@ -3337,7 +3325,7 @@
     directBufferSupportInitializeEnded = 1;
   } else {
     while (!directBufferSupportInitializeEnded && !directBufferSupportInitializeFailed) {
-      os::yield();
+      os::naked_yield();
     }
   }
 
--- a/hotspot/src/share/vm/prims/jvm.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/prims/jvm.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -53,7 +53,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/jfieldIDWorkaround.hpp"
 #include "runtime/orderAccess.inline.hpp"
-#include "runtime/os.hpp"
+#include "runtime/os.inline.hpp"
 #include "runtime/perfData.hpp"
 #include "runtime/reflection.hpp"
 #include "runtime/thread.inline.hpp"
@@ -3042,7 +3042,7 @@
   if (ConvertYieldToSleep) {
     os::sleep(thread, MinSleepInterval, false);
   } else {
-    os::yield();
+    os::naked_yield();
   }
 JVM_END
 
@@ -3072,7 +3072,7 @@
     // It appears that in certain GUI contexts, it may be beneficial to do a short sleep
     // for SOLARIS
     if (ConvertSleepToYield) {
-      os::yield();
+      os::naked_yield();
     } else {
       ThreadState old_state = thread->osthread()->get_state();
       thread->osthread()->set_state(SLEEPING);
--- a/hotspot/src/share/vm/prims/jvmtiExport.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiExport.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -47,6 +47,7 @@
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/objectMonitor.hpp"
 #include "runtime/objectMonitor.inline.hpp"
+#include "runtime/os.inline.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/vframe.hpp"
 #include "services/attachListener.hpp"
--- a/hotspot/src/share/vm/prims/jvmtiTagMap.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiTagMap.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -3019,7 +3019,7 @@
 
   // If there are any non-perm roots in the code cache, visit them.
   blk.set_kind(JVMTI_HEAP_REFERENCE_OTHER);
-  CodeBlobToOopClosure look_in_blobs(&blk, false);
+  CodeBlobToOopClosure look_in_blobs(&blk, !CodeBlobToOopClosure::FixRelocations);
   CodeCache::scavenge_root_nmethods_do(&look_in_blobs);
 
   return true;
--- a/hotspot/src/share/vm/prims/nativeLookup.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/prims/nativeLookup.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -41,21 +41,6 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
 #include "utilities/macros.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_aix
-# include "os_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 
 
 static void mangle_name_on(outputStream* st, Symbol* name, int begin, int end) {
--- a/hotspot/src/share/vm/prims/whitebox.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/prims/whitebox.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 
+#include "memory/metadataFactory.hpp"
 #include "memory/universe.hpp"
 #include "oops/oop.inline.hpp"
 
@@ -38,6 +39,7 @@
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/os.hpp"
 
+#include "utilities/array.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/exceptions.hpp"
@@ -726,7 +728,6 @@
   return result;
 WB_END
 
-
 WB_ENTRY(jlong, WB_GetThreadStackSize(JNIEnv* env, jobject o))
   return (jlong) Thread::current()->stack_size();
 WB_END
@@ -736,6 +737,35 @@
   return (jlong) t->stack_available(os::current_stack_pointer()) - (jlong) StackShadowPages * os::vm_page_size();
 WB_END
 
+int WhiteBox::array_bytes_to_length(size_t bytes) {
+  return Array<u1>::bytes_to_length(bytes);
+}
+
+WB_ENTRY(jlong, WB_AllocateMetaspace(JNIEnv* env, jobject wb, jobject class_loader, jlong size))
+  if (size < 0) {
+    THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
+        err_msg("WB_AllocateMetaspace: size is negative: " JLONG_FORMAT, size));
+  }
+
+  oop class_loader_oop = JNIHandles::resolve(class_loader);
+  ClassLoaderData* cld = class_loader_oop != NULL
+      ? java_lang_ClassLoader::loader_data(class_loader_oop)
+      : ClassLoaderData::the_null_class_loader_data();
+
+  void* metadata = MetadataFactory::new_writeable_array<u1>(cld, WhiteBox::array_bytes_to_length((size_t)size), thread);
+
+  return (jlong)(uintptr_t)metadata;
+WB_END
+
+WB_ENTRY(void, WB_FreeMetaspace(JNIEnv* env, jobject wb, jobject class_loader, jlong addr, jlong size))
+  oop class_loader_oop = JNIHandles::resolve(class_loader);
+  ClassLoaderData* cld = class_loader_oop != NULL
+      ? java_lang_ClassLoader::loader_data(class_loader_oop)
+      : ClassLoaderData::the_null_class_loader_data();
+
+  MetadataFactory::free_array(cld, (Array<u1>*)(uintptr_t)addr);
+WB_END
+
 //Some convenience methods to deal with objects from java
 int WhiteBox::offset_for_field(const char* field_name, oop object,
     Symbol* signature_symbol) {
@@ -866,6 +896,10 @@
   {CC"isInStringTable",    CC"(Ljava/lang/String;)Z", (void*)&WB_IsInStringTable  },
   {CC"fullGC",   CC"()V",                             (void*)&WB_FullGC },
   {CC"readReservedMemory", CC"()V",                   (void*)&WB_ReadReservedMemory },
+  {CC"allocateMetaspace",
+     CC"(Ljava/lang/ClassLoader;J)J",                 (void*)&WB_AllocateMetaspace },
+  {CC"freeMetaspace",
+     CC"(Ljava/lang/ClassLoader;JJ)V",                (void*)&WB_FreeMetaspace },
   {CC"getCPUFeatures",     CC"()Ljava/lang/String;",  (void*)&WB_GetCPUFeatures     },
   {CC"getNMethod",         CC"(Ljava/lang/reflect/Executable;Z)[Ljava/lang/Object;",
                                                       (void*)&WB_GetNMethod         },
--- a/hotspot/src/share/vm/prims/whitebox.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/prims/whitebox.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -62,6 +62,8 @@
     Symbol* signature_symbol);
   static const char* lookup_jstring(const char* field_name, oop object);
   static bool lookup_bool(const char* field_name, oop object);
+
+  static int array_bytes_to_length(size_t bytes);
 };
 
 
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -37,26 +37,12 @@
 #include "runtime/arguments.hpp"
 #include "runtime/globals_extension.hpp"
 #include "runtime/java.hpp"
+#include "runtime/os.hpp"
 #include "services/management.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/taskqueue.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_aix
-# include "os_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
--- a/hotspot/src/share/vm/runtime/arguments.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -26,6 +26,7 @@
 #define SHARE_VM_RUNTIME_ARGUMENTS_HPP
 
 #include "runtime/java.hpp"
+#include "runtime/os.hpp"
 #include "runtime/perfData.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/top.hpp"
--- a/hotspot/src/share/vm/runtime/atomic.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/atomic.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,23 +23,6 @@
  */
 
 #include "precompiled.hpp"
-#include "runtime/atomic.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_aix
-# include "os_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
-
 #include "runtime/atomic.inline.hpp"
 
 jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
--- a/hotspot/src/share/vm/runtime/frame.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/frame.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -40,6 +40,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/monitorChunk.hpp"
+#include "runtime/os.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/stubCodeGenerator.hpp"
--- a/hotspot/src/share/vm/runtime/globals.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1130,29 +1130,30 @@
           "Use LWP-based instead of libthread-based synchronization "       \
           "(SPARC only)")                                                   \
                                                                             \
-  product(ccstr, SyncKnobs, NULL,                                           \
-          "(Unstable) Various monitor synchronization tunables")            \
-                                                                            \
-  product(intx, EmitSync, 0,                                                \
-          "(Unsafe, Unstable) "                                             \
-          "Control emission of inline sync fast-path code")                 \
+  experimental(ccstr, SyncKnobs, NULL,                                      \
+               "(Unstable) Various monitor synchronization tunables")       \
+                                                                            \
+  experimental(intx, EmitSync, 0,                                           \
+               "(Unsafe, Unstable) "                                        \
+               "Control emission of inline sync fast-path code")            \
                                                                             \
   product(intx, MonitorBound, 0, "Bound Monitor population")                \
                                                                             \
   product(bool, MonitorInUseLists, false, "Track Monitors for Deflation")   \
                                                                             \
-  product(intx, SyncFlags, 0, "(Unsafe, Unstable) Experimental Sync flags") \
-                                                                            \
-  product(intx, SyncVerbose, 0, "(Unstable)")                               \
-                                                                            \
-  product(intx, ClearFPUAtPark, 0, "(Unsafe, Unstable)")                    \
-                                                                            \
-  product(intx, hashCode, 5,                                                \
-          "(Unstable) select hashCode generation algorithm")                \
-                                                                            \
-  product(intx, WorkAroundNPTLTimedWaitHang, 1,                             \
-          "(Unstable, Linux-specific) "                                     \
-          "avoid NPTL-FUTEX hang pthread_cond_timedwait")                   \
+  experimental(intx, SyncFlags, 0, "(Unsafe, Unstable) "                    \
+               "Experimental Sync flags")                                   \
+                                                                            \
+  experimental(intx, SyncVerbose, 0, "(Unstable)")                          \
+                                                                            \
+  experimental(intx, ClearFPUAtPark, 0, "(Unsafe, Unstable)")               \
+                                                                            \
+  experimental(intx, hashCode, 5,                                           \
+               "(Unstable) select hashCode generation algorithm")           \
+                                                                            \
+  experimental(intx, WorkAroundNPTLTimedWaitHang, 1,                        \
+               "(Unstable, Linux-specific) "                                \
+               "avoid NPTL-FUTEX hang pthread_cond_timedwait")              \
                                                                             \
   product(bool, FilterSpuriousWakeups, true,                                \
           "When true prevents OS-level spurious, or premature, wakeups "    \
--- a/hotspot/src/share/vm/runtime/handles.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/handles.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -29,18 +29,6 @@
 #include "runtime/atomic.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/thread.inline.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
--- a/hotspot/src/share/vm/runtime/icache.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/icache.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -34,6 +34,9 @@
   ResourceMark rm;
 
   BufferBlob* b = BufferBlob::create("flush_icache_stub", ICache::stub_size);
+  if (b == NULL) {
+    vm_exit_out_of_memory(ICache::stub_size, OOM_MALLOC_ERROR, "CodeCache: no space for flush_icache_stub");
+  }
   CodeBuffer c(b);
 
   ICacheStubGenerator g(&c);
--- a/hotspot/src/share/vm/runtime/interfaceSupport.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/interfaceSupport.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -32,6 +32,7 @@
 #include "runtime/init.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/orderAccess.inline.hpp"
+#include "runtime/os.inline.hpp"
 #include "runtime/threadLocalStorage.hpp"
 #include "runtime/vframe.hpp"
 #include "utilities/preserveException.hpp"
--- a/hotspot/src/share/vm/runtime/javaCalls.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/javaCalls.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,6 +37,7 @@
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/os.inline.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
--- a/hotspot/src/share/vm/runtime/mutex.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/mutex.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,4 +1,3 @@
-
 /*
  * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -488,7 +487,6 @@
   for (;;) {
     assert(_OnDeck == ESelf, "invariant");
     if (TrySpin(Self)) break;
-    // CONSIDER: if ESelf->TryPark() && TryLock() break ...
     // It's probably wise to spin only if we *actually* blocked
     // CONSIDER: check the lockbyte, if it remains set then
     // preemptively drain the cxq into the EntryList.
--- a/hotspot/src/share/vm/runtime/mutexLocker.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/mutexLocker.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/os.inline.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadLocalStorage.hpp"
--- a/hotspot/src/share/vm/runtime/mutexLocker.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/mutexLocker.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -27,21 +27,6 @@
 
 #include "memory/allocation.hpp"
 #include "runtime/mutex.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_aix
-# include "os_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 
 // Mutexes used in the VM.
 
--- a/hotspot/src/share/vm/runtime/objectMonitor.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/objectMonitor.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -43,24 +43,12 @@
 #include "utilities/dtrace.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/preserveException.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 
 #if defined(__GNUC__) && !defined(IA64) && !defined(PPC64)
   // Need to inhibit inlining for older versions of GCC to avoid build-time failures
-  #define ATTR __attribute__((noinline))
+  #define NOINLINE __attribute__((noinline))
 #else
-  #define ATTR
+  #define NOINLINE
 #endif
 
 
@@ -115,38 +103,39 @@
 // The knob* variables are effectively final.  Once set they should
 // never be modified hence.  Consider using __read_mostly with GCC.
 
-int ObjectMonitor::Knob_Verbose    = 0;
-int ObjectMonitor::Knob_SpinLimit  = 5000;    // derived by an external tool -
-static int Knob_LogSpins           = 0;       // enable jvmstat tally for spins
-static int Knob_HandOff            = 0;
-static int Knob_ReportSettings     = 0;
+int ObjectMonitor::Knob_Verbose     = 0;
+int ObjectMonitor::Knob_VerifyInUse = 0;
+int ObjectMonitor::Knob_SpinLimit   = 5000;    // derived by an external tool -
+static int Knob_LogSpins            = 0;       // enable jvmstat tally for spins
+static int Knob_HandOff             = 0;
+static int Knob_ReportSettings      = 0;
 
-static int Knob_SpinBase           = 0;       // Floor AKA SpinMin
-static int Knob_SpinBackOff        = 0;       // spin-loop backoff
-static int Knob_CASPenalty         = -1;      // Penalty for failed CAS
-static int Knob_OXPenalty          = -1;      // Penalty for observed _owner change
-static int Knob_SpinSetSucc        = 1;       // spinners set the _succ field
-static int Knob_SpinEarly          = 1;
-static int Knob_SuccEnabled        = 1;       // futile wake throttling
-static int Knob_SuccRestrict       = 0;       // Limit successors + spinners to at-most-one
-static int Knob_MaxSpinners        = -1;      // Should be a function of # CPUs
-static int Knob_Bonus              = 100;     // spin success bonus
-static int Knob_BonusB             = 100;     // spin success bonus
-static int Knob_Penalty            = 200;     // spin failure penalty
-static int Knob_Poverty            = 1000;
-static int Knob_SpinAfterFutile    = 1;       // Spin after returning from park()
-static int Knob_FixedSpin          = 0;
-static int Knob_OState             = 3;       // Spinner checks thread state of _owner
-static int Knob_UsePause           = 1;
-static int Knob_ExitPolicy         = 0;
-static int Knob_PreSpin            = 10;      // 20-100 likely better
-static int Knob_ResetEvent         = 0;
-static int BackOffMask             = 0;
+static int Knob_SpinBase            = 0;       // Floor AKA SpinMin
+static int Knob_SpinBackOff         = 0;       // spin-loop backoff
+static int Knob_CASPenalty          = -1;      // Penalty for failed CAS
+static int Knob_OXPenalty           = -1;      // Penalty for observed _owner change
+static int Knob_SpinSetSucc         = 1;       // spinners set the _succ field
+static int Knob_SpinEarly           = 1;
+static int Knob_SuccEnabled         = 1;       // futile wake throttling
+static int Knob_SuccRestrict        = 0;       // Limit successors + spinners to at-most-one
+static int Knob_MaxSpinners         = -1;      // Should be a function of # CPUs
+static int Knob_Bonus               = 100;     // spin success bonus
+static int Knob_BonusB              = 100;     // spin success bonus
+static int Knob_Penalty             = 200;     // spin failure penalty
+static int Knob_Poverty             = 1000;
+static int Knob_SpinAfterFutile     = 1;       // Spin after returning from park()
+static int Knob_FixedSpin           = 0;
+static int Knob_OState              = 3;       // Spinner checks thread state of _owner
+static int Knob_UsePause            = 1;
+static int Knob_ExitPolicy          = 0;
+static int Knob_PreSpin             = 10;      // 20-100 likely better
+static int Knob_ResetEvent          = 0;
+static int BackOffMask              = 0;
 
-static int Knob_FastHSSEC          = 0;
-static int Knob_MoveNotifyee       = 2;       // notify() - disposition of notifyee
-static int Knob_QMode              = 0;       // EntryList-cxq policy - queue discipline
-static volatile int InitDone       = 0;
+static int Knob_FastHSSEC           = 0;
+static int Knob_MoveNotifyee        = 2;       // notify() - disposition of notifyee
+static int Knob_QMode               = 0;       // EntryList-cxq policy - queue discipline
+static volatile int InitDone        = 0;
 
 #define TrySpin TrySpin_VaryDuration
 
@@ -211,7 +200,7 @@
 //   on EntryList|cxq.  That is, spinning relieves contention on the "inner"
 //   locks and monitor metadata.
 //
-//   Cxq points to the the set of Recently Arrived Threads attempting entry.
+//   Cxq points to the set of Recently Arrived Threads attempting entry.
 //   Because we push threads onto _cxq with CAS, the RATs must take the form of
 //   a singly-linked LIFO.  We drain _cxq into EntryList  at unlock-time when
 //   the unlocking thread notices that EntryList is null but _cxq is != null.
@@ -281,13 +270,12 @@
   }
 }
 
-void ATTR ObjectMonitor::enter(TRAPS) {
+void NOINLINE ObjectMonitor::enter(TRAPS) {
   // The following code is ordered to check the most common cases first
   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
   Thread * const Self = THREAD;
-  void * cur;
 
-  cur = Atomic::cmpxchg_ptr(Self, &_owner, NULL);
+  void * cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL);
   if (cur == NULL) {
      // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
      assert(_recursions == 0   , "invariant");
@@ -447,26 +435,24 @@
 // Callers must compensate as needed.
 
 int ObjectMonitor::TryLock (Thread * Self) {
-   for (;;) {
-      void * own = _owner;
-      if (own != NULL) return 0;
-      if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
-         // Either guarantee _recursions == 0 or set _recursions = 0.
-         assert(_recursions == 0, "invariant");
-         assert(_owner == Self, "invariant");
-         // CONSIDER: set or assert that OwnerIsThread == 1
-         return 1;
-      }
-      // The lock had been free momentarily, but we lost the race to the lock.
-      // Interference -- the CAS failed.
-      // We can either return -1 or retry.
-      // Retry doesn't make as much sense because the lock was just acquired.
-      if (true) return -1;
-   }
+  void * own = _owner;
+  if (own != NULL) return 0;
+  if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
+    // Either guarantee _recursions == 0 or set _recursions = 0.
+    assert(_recursions == 0, "invariant");
+    assert(_owner == Self, "invariant");
+    // CONSIDER: set or assert that OwnerIsThread == 1
+    return 1;
+  }
+  // The lock had been free momentarily, but we lost the race to the lock.
+  // Interference -- the CAS failed.
+  // We can either return -1 or retry.
+  // Retry doesn't make as much sense because the lock was just acquired.
+  return -1;
 }
 
-void ATTR ObjectMonitor::EnterI (TRAPS) {
-    Thread * Self = THREAD;
+void NOINLINE ObjectMonitor::EnterI (TRAPS) {
+    Thread * const Self = THREAD;
     assert(Self->is_Java_thread(), "invariant");
     assert(((JavaThread *) Self)->thread_state() == _thread_blocked   , "invariant");
 
@@ -562,7 +548,7 @@
         Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
     }
 
-    // The lock have been released while this thread was occupied queueing
+    // The lock might have been released while this thread was occupied queueing
     // itself onto _cxq.  To close the race and avoid "stranding" and
     // progress-liveness failure we must resample-retry _owner before parking.
     // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
@@ -714,7 +700,7 @@
 // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
 // loop accordingly.
 
-void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
+void NOINLINE ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
     assert(Self != NULL                , "invariant");
     assert(SelfNode != NULL            , "invariant");
     assert(SelfNode->_thread == Self   , "invariant");
@@ -802,6 +788,7 @@
     OrderAccess::fence();      // see comments at the end of EnterI()
 }
 
+// By convention we unlink a contending thread from EntryList|cxq immediately
 // after the thread acquires the lock in ::enter().  Equally, we could defer
 // unlinking the thread until ::exit()-time.
 
@@ -822,7 +809,7 @@
         assert(prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant");
         TEVENT(Unlink from EntryList);
     } else {
-        guarantee(SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant");
+        assert(SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant");
         // Inopportune interleaving -- Self is still on the cxq.
         // This usually means the enqueue of self raced an exiting thread.
         // Normally we'll find Self near the front of the cxq, so
@@ -862,10 +849,12 @@
         TEVENT(Unlink from cxq);
     }
 
+#ifdef ASSERT
     // Diagnostic hygiene ...
     SelfNode->_prev  = (ObjectWaiter *) 0xBAD;
     SelfNode->_next  = (ObjectWaiter *) 0xBAD;
     SelfNode->TState = ObjectWaiter::TS_RUN;
+#endif
 }
 
 // -----------------------------------------------------------------------------
@@ -918,9 +907,15 @@
 // the integral of the # of active timers at any instant over time).
 // Both impinge on OS scalability.  Given that, at most one thread parked on
 // a monitor will use a timer.
+//
+// There is also the risk of a futile wake-up. If we drop the lock
+// another thread can reacquire the lock immediately, and we can
+// then wake a thread unnecessarily. This is benign, and we've
+// structured the code so the windows are short and the frequency
+// of such futile wakups is low.
 
-void ATTR ObjectMonitor::exit(bool not_suspended, TRAPS) {
-   Thread * Self = THREAD;
+void NOINLINE ObjectMonitor::exit(bool not_suspended, TRAPS) {
+   Thread * const Self = THREAD;
    if (THREAD != _owner) {
      if (THREAD->is_lock_owned((address) _owner)) {
        // Transmute _owner from a BasicLock pointer to a Thread address.
@@ -932,14 +927,17 @@
        _recursions = 0;
        OwnerIsThread = 1;
      } else {
-       // NOTE: we need to handle unbalanced monitor enter/exit
-       // in native code by throwing an exception.
-       // TODO: Throw an IllegalMonitorStateException ?
+       // Apparent unbalanced locking ...
+       // Naively we'd like to throw IllegalMonitorStateException.
+       // As a practical matter we can neither allocate nor throw an
+       // exception as ::exit() can be called from leaf routines.
+       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
+       // Upon deeper reflection, however, in a properly run JVM the only
+       // way we should encounter this situation is in the presence of
+       // unbalanced JNI locking. TODO: CheckJNICalls.
+       // See also: CR4414101
        TEVENT(Exit - Throw IMSX);
-       assert(false, "Non-balanced monitor enter/exit!");
-       if (false) {
-          THROW(vmSymbols::java_lang_IllegalMonitorStateException());
-       }
+       assert(false, "Non-balanced monitor enter/exit! Likely JNI locking");
        return;
      }
    }
@@ -988,6 +986,7 @@
             return;
          }
          TEVENT(Inflated exit - complex egress);
+         // Other threads are blocked trying to acquire the lock.
 
          // Normally the exiting thread is responsible for ensuring succession,
          // but if other successors are ready or other entering threads are spinning
@@ -1154,9 +1153,9 @@
       if (w != NULL) {
           // I'd like to write: guarantee (w->_thread != Self).
           // But in practice an exiting thread may find itself on the EntryList.
-          // Lets say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
+          // Let's say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
           // then calls exit().  Exit release the lock by setting O._owner to NULL.
-          // Lets say T1 then stalls.  T2 acquires O and calls O.notify().  The
+          // Let's say T1 then stalls.  T2 acquires O and calls O.notify().  The
           // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
           // release the lock "O".  T2 resumes immediately after the ST of null into
           // _owner, above.  T2 notices that the EntryList is populated, so it
@@ -1273,10 +1272,13 @@
 //       MEMBAR
 //       LD Self_>_suspend_flags
 //
+// UPDATE 2007-10-6: since I've replaced the native Mutex/Monitor subsystem
+// with a more efficient implementation, the need to use "FastHSSEC" has
+// decreased. - Dave
 
 
 bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) {
-   int Mode = Knob_FastHSSEC;
+   const int Mode = Knob_FastHSSEC;
    if (Mode && !jSelf->is_external_suspend()) {
       assert(jSelf->is_suspend_equivalent(), "invariant");
       jSelf->clear_suspend_equivalent();
@@ -1425,7 +1427,7 @@
 // Wait/Notify/NotifyAll
 //
 // Note: a subset of changes to ObjectMonitor::wait()
-// will need to be replicated in complete_exit above
+// will need to be replicated in complete_exit
 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
    Thread * const Self = THREAD;
    assert(Self->is_Java_thread(), "Must be Java thread!");
@@ -2280,12 +2282,12 @@
   assert(_event != NULL, "invariant");
 }
 
-void ObjectWaiter::wait_reenter_begin(ObjectMonitor *mon) {
+void ObjectWaiter::wait_reenter_begin(ObjectMonitor * const mon) {
   JavaThread *jt = (JavaThread *)this->_thread;
   _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon);
 }
 
-void ObjectWaiter::wait_reenter_end(ObjectMonitor *mon) {
+void ObjectWaiter::wait_reenter_end(ObjectMonitor * const mon) {
   JavaThread *jt = (JavaThread *)this->_thread;
   JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active);
 }
@@ -2467,6 +2469,7 @@
   #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); }
   SETKNOB(ReportSettings);
   SETKNOB(Verbose);
+  SETKNOB(VerifyInUse);
   SETKNOB(FixedSpin);
   SETKNOB(SpinLimit);
   SETKNOB(SpinBase);
--- a/hotspot/src/share/vm/runtime/objectMonitor.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/objectMonitor.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -311,6 +311,7 @@
 
  public:
   static int Knob_Verbose;
+  static int Knob_VerifyInUse;
   static int Knob_SpinLimit;
   void* operator new (size_t size) throw() {
     return AllocateHeap(size, mtInternal);
--- a/hotspot/src/share/vm/runtime/os.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/os.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -46,7 +46,7 @@
 #include "runtime/java.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
-#include "runtime/os.hpp"
+#include "runtime/os.inline.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
 #include "services/attachListener.hpp"
@@ -54,18 +54,6 @@
 #include "services/threadService.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/events.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 
 # include <signal.h>
 
--- a/hotspot/src/share/vm/runtime/os.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/os.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -442,16 +442,7 @@
   // ms = 0, will sleep for the least amount of time allowed by the OS.
   static void naked_short_sleep(jlong ms);
   static void infinite_sleep(); // never returns, use with CAUTION
-  static void yield();        // Yields to all threads with same priority
-  enum YieldResult {
-    YIELD_SWITCHED = 1,         // caller descheduled, other ready threads exist & ran
-    YIELD_NONEREADY = 0,        // No other runnable/ready threads.
-                                // platform-specific yield return immediately
-    YIELD_UNKNOWN = -1          // Unknown: platform doesn't support _SWITCHED or _NONEREADY
-    // YIELD_SWITCHED and YIELD_NONREADY imply the platform supports a "strong"
-    // yield that can be used in lieu of blocking.
-  } ;
-  static YieldResult NakedYield () ;
+  static void naked_yield () ;
   static OSReturn set_priority(Thread* thread, ThreadPriority priority);
   static OSReturn get_priority(const Thread* const thread, ThreadPriority& priority);
 
@@ -478,9 +469,6 @@
   // run cmd in a separate process and return its exit code; or -1 on failures
   static int fork_and_exec(char *cmd);
 
-  // Set file to send error reports.
-  static void set_error_file(const char *logfile);
-
   // os::exit() is merged with vm_exit()
   // static void exit(int num);
 
@@ -629,11 +617,6 @@
   static void     print_jni_name_prefix_on(outputStream* st, int args_size);
   static void     print_jni_name_suffix_on(outputStream* st, int args_size);
 
-  // File conventions
-  static const char* file_separator();
-  static const char* line_separator();
-  static const char* path_separator();
-
   // Init os specific system properties values
   static void init_system_properties_values();
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/runtime/os.inline.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_OS_INLINE_HPP
+#define SHARE_VM_RUNTIME_OS_INLINE_HPP
+
+#include "runtime/os.hpp"
+
+#ifdef TARGET_OS_FAMILY_linux
+# include "os_linux.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_solaris
+# include "os_solaris.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_windows
+# include "os_windows.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_bsd
+# include "os_bsd.inline.hpp"
+#endif
+
+#endif // SHARE_VM_RUNTIME_OS_INLINE_HPP
--- a/hotspot/src/share/vm/runtime/safepoint.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/safepoint.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -322,7 +322,7 @@
         SpinPause() ;     // MP-Polite spin
       } else
       if (steps < DeferThrSuspendLoopCount) {
-        os::NakedYield() ;
+        os::naked_yield() ;
       } else {
         os::naked_short_sleep(1);
       }
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1810,14 +1810,8 @@
 
 
 // Handles the uncommon case in locking, i.e., contention or an inflated lock.
-#ifndef PRODUCT
-int SharedRuntime::_monitor_enter_ctr=0;
-#endif
 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::complete_monitor_locking_C(oopDesc* _obj, BasicLock* lock, JavaThread* thread))
   oop obj(_obj);
-#ifndef PRODUCT
-  _monitor_enter_ctr++;             // monitor enter slow
-#endif
   if (PrintBiasedLockingStatistics) {
     Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
   }
@@ -1831,15 +1825,9 @@
   assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
 JRT_END
 
-#ifndef PRODUCT
-int SharedRuntime::_monitor_exit_ctr=0;
-#endif
 // Handles the uncommon cases of monitor unlocking in compiled code
 JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* _obj, BasicLock* lock))
    oop obj(_obj);
-#ifndef PRODUCT
-  _monitor_exit_ctr++;              // monitor exit slow
-#endif
   Thread* THREAD = JavaThread::current();
   // I'm not convinced we need the code contained by MIGHT_HAVE_PENDING anymore
   // testing was unable to ever fire the assert that guarded it so I have removed it.
@@ -1879,8 +1867,6 @@
   ttyLocker ttyl;
   if (xtty != NULL)  xtty->head("statistics type='SharedRuntime'");
 
-  if (_monitor_enter_ctr) tty->print_cr("%5d monitor enter slow",  _monitor_enter_ctr);
-  if (_monitor_exit_ctr) tty->print_cr("%5d monitor exit slow",   _monitor_exit_ctr);
   if (_throw_null_ctr) tty->print_cr("%5d implicit null throw", _throw_null_ctr);
 
   SharedRuntime::print_ic_miss_histogram();
@@ -2464,9 +2450,9 @@
     if (PrintAdapterHandlers || PrintStubCode) {
       ttyLocker ttyl;
       entry->print_adapter_on(tty);
-      tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)",
+      tty->print_cr("i2c argument handler #%d for: %s %s %s (%d bytes generated)",
                     _adapters->number_of_entries(), (method->is_static() ? "static" : "receiver"),
-                    method->signature()->as_C_string(), insts_size);
+                    method->signature()->as_C_string(), fingerprint->as_string(), insts_size);
       tty->print_cr("c2i argument handler starts at %p", entry->get_c2i_entry());
       if (Verbose || PrintStubCode) {
         address first_pc = entry->base_address();
--- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -516,8 +516,6 @@
   static void trace_ic_miss(address at);
 
  public:
-  static int _monitor_enter_ctr;                 // monitor enter slow
-  static int _monitor_exit_ctr;                  // monitor exit slow
   static int _throw_null_ctr;                    // throwing a null-pointer exception
   static int _ic_miss_ctr;                       // total # of IC misses
   static int _wrong_method_ctr;
--- a/hotspot/src/share/vm/runtime/synchronizer.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/synchronizer.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -41,24 +41,12 @@
 #include "utilities/dtrace.hpp"
 #include "utilities/events.hpp"
 #include "utilities/preserveException.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 
 #if defined(__GNUC__) && !defined(PPC64)
   // Need to inhibit inlining for older versions of GCC to avoid build-time failures
-  #define ATTR __attribute__((noinline))
+  #define NOINLINE __attribute__((noinline))
 #else
-  #define ATTR
+  #define NOINLINE
 #endif
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
@@ -218,14 +206,6 @@
     return;
   }
 
-#if 0
-  // The following optimization isn't particularly useful.
-  if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
-    lock->set_displaced_header(NULL);
-    return;
-  }
-#endif
-
   // The object header will never be displaced to this lock,
   // so it does not matter what the value is, except that it
   // must be non-zero to avoid looking like a re-entrant lock,
@@ -469,7 +449,7 @@
     ++its;
     if (its > 10000 || !os::is_MP()) {
        if (its & 1) {
-         os::NakedYield();
+         os::naked_yield();
          TEVENT(Inflate: INFLATING - yield);
        } else {
          // Note that the following code attenuates the livelock problem but is not
@@ -499,7 +479,7 @@
            if ((YieldThenBlock++) >= 16) {
               Thread::current()->_ParkEvent->park(1);
            } else {
-              os::NakedYield();
+              os::naked_yield();
            }
          }
          Thread::muxRelease(InflationLocks + ix);
@@ -585,7 +565,7 @@
     // added check of the bias pattern is to avoid useless calls to
     // thread-local storage.
     if (obj->mark()->has_bias_pattern()) {
-      // Box and unbox the raw reference just in case we cause a STW safepoint.
+      // Handle for oop obj in case of STW safepoint
       Handle hobj(Self, obj);
       // Relaxing assertion for bug 6320749.
       assert(Universe::verify_in_progress() ||
@@ -902,23 +882,23 @@
     }
   }
 }
-/* Too slow for general assert or debug
+
 void ObjectSynchronizer::verifyInUse (Thread *Self) {
    ObjectMonitor* mid;
    int inusetally = 0;
    for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
-     inusetally ++;
+     inusetally++;
    }
    assert(inusetally == Self->omInUseCount, "inuse count off");
 
    int freetally = 0;
    for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
-     freetally ++;
+     freetally++;
    }
    assert(freetally == Self->omFreeCount, "free count off");
 }
-*/
-ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
+
+ObjectMonitor * NOINLINE ObjectSynchronizer::omAlloc (Thread * Self) {
     // A large MAXPRIVATE value reduces both list lock contention
     // and list coherency traffic, but also tends to increase the
     // number of objectMonitors in circulation as well as the STW
@@ -944,7 +924,9 @@
              m->FreeNext = Self->omInUseList;
              Self->omInUseList = m;
              Self->omInUseCount++;
-             // verifyInUse(Self);
+             if (ObjectMonitor::Knob_VerifyInUse) {
+               verifyInUse(Self);
+             }
            } else {
              m->FreeNext = NULL;
            }
@@ -1064,7 +1046,9 @@
            curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
          }
          Self->omInUseCount--;
-         // verifyInUse(Self);
+         if (ObjectMonitor::Knob_VerifyInUse) {
+           verifyInUse(Self);
+         }
          break;
        } else {
          curmidinuse = mid;
@@ -1073,7 +1057,7 @@
     }
   }
 
-  // FreeNext is used for both onInUseList and omFreeList, so clear old before setting new
+  // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new
   m->FreeNext = Self->omFreeList;
   Self->omFreeList = m;
   Self->omFreeCount++;
@@ -1086,7 +1070,7 @@
 // consecutive STW safepoints.  Relatedly, we might decay
 // omFreeProvision at STW safepoints.
 //
-// Also return the monitors of a moribund thread"s omInUseList to
+// Also return the monitors of a moribund thread's omInUseList to
 // a global gOmInUseList under the global list lock so these
 // will continue to be scanned.
 //
@@ -1127,7 +1111,6 @@
         InUseTail = curom;
         InUseTally++;
       }
-// TODO debug
       assert(Self->omInUseCount == InUseTally, "inuse count off");
       Self->omInUseCount = 0;
       guarantee(InUseTail != NULL && InUseList != NULL, "invariant");
@@ -1166,7 +1149,7 @@
 // multiple locks occupy the same $ line.  Padding might be appropriate.
 
 
-ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) {
+ObjectMonitor * NOINLINE ObjectSynchronizer::inflate (Thread * Self, oop object) {
   // Inflate mutates the heap ...
   // Relaxing assertion for bug 6320749.
   assert(Universe::verify_in_progress() ||
@@ -1397,7 +1380,7 @@
 // Deflate a single monitor if not in use
 // Return true if deflated, false if in use
 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
-                                         ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
+                                         ObjectMonitor** freeHeadp, ObjectMonitor** freeTailp) {
   bool deflated;
   // Normal case ... The monitor is associated with obj.
   guarantee(obj->mark() == markOopDesc::encode(mid), "invariant");
@@ -1427,13 +1410,13 @@
      assert(mid->object() == NULL, "invariant");
 
      // Move the object to the working free list defined by FreeHead,FreeTail.
-     if (*FreeHeadp == NULL) *FreeHeadp = mid;
-     if (*FreeTailp != NULL) {
-       ObjectMonitor * prevtail = *FreeTailp;
+     if (*freeHeadp == NULL) *freeHeadp = mid;
+     if (*freeTailp != NULL) {
+       ObjectMonitor * prevtail = *freeTailp;
        assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK
        prevtail->FreeNext = mid;
       }
-     *FreeTailp = mid;
+     *freeTailp = mid;
      deflated = true;
   }
   return deflated;
@@ -1441,7 +1424,7 @@
 
 // Caller acquires ListLock
 int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp,
-                                          ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
+                                          ObjectMonitor** freeHeadp, ObjectMonitor** freeTailp) {
   ObjectMonitor* mid;
   ObjectMonitor* next;
   ObjectMonitor* curmidinuse = NULL;
@@ -1451,7 +1434,7 @@
      oop obj = (oop) mid->object();
      bool deflated = false;
      if (obj != NULL) {
-       deflated = deflate_monitor(mid, obj, FreeHeadp, FreeTailp);
+       deflated = deflate_monitor(mid, obj, freeHeadp, freeTailp);
      }
      if (deflated) {
        // extract from per-thread in-use-list
@@ -1494,7 +1477,9 @@
       nInCirculation+= cur->omInUseCount;
       int deflatedcount = walk_monitor_list(cur->omInUseList_addr(), &FreeHead, &FreeTail);
       cur->omInUseCount-= deflatedcount;
-      // verifyInUse(cur);
+      if (ObjectMonitor::Knob_VerifyInUse) {
+        verifyInUse(cur);
+      }
       nScavenged += deflatedcount;
       nInuse += cur->omInUseCount;
      }
--- a/hotspot/src/share/vm/runtime/synchronizer.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/synchronizer.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -84,7 +84,7 @@
   static void reenter            (Handle obj, intptr_t recursion, TRAPS);
 
   // thread-specific and global objectMonitor free list accessors
-//  static void verifyInUse (Thread * Self) ; too slow for general assert/debug
+  static void verifyInUse(Thread * Self);
   static ObjectMonitor * omAlloc(Thread * Self);
   static void omRelease(Thread * Self, ObjectMonitor * m, bool FromPerThreadAlloc);
   static void omFlush(Thread * Self);
@@ -114,10 +114,10 @@
   // An adaptive profile-based deflation policy could be used if needed
   static void deflate_idle_monitors();
   static int walk_monitor_list(ObjectMonitor** listheadp,
-                               ObjectMonitor** FreeHeadp,
-                               ObjectMonitor** FreeTailp);
-  static bool deflate_monitor(ObjectMonitor* mid, oop obj, ObjectMonitor** FreeHeadp,
-                              ObjectMonitor** FreeTailp);
+                               ObjectMonitor** freeHeadp,
+                               ObjectMonitor** freeTailp);
+  static bool deflate_monitor(ObjectMonitor* mid, oop obj, ObjectMonitor** freeHeadp,
+                              ObjectMonitor** freeTailp);
   static void oops_do(OopClosure* f);
 
   // debugging
@@ -130,7 +130,10 @@
   enum { _BLOCKSIZE = 128 };
   static ObjectMonitor* gBlockList;
   static ObjectMonitor * volatile gFreeList;
-  static ObjectMonitor * volatile gOmInUseList; // for moribund thread, so monitors they inflated still get scanned
+  // global monitor in use list, for moribund threads,
+  // monitors they inflated need to be scanned for deflation
+  static ObjectMonitor * volatile gOmInUseList;
+  // count of entries in gOmInUseList
   static int gOmInUseCount;
 
 };
--- a/hotspot/src/share/vm/runtime/task.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/task.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,18 +28,6 @@
 #include "runtime/task.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 
 int PeriodicTask::_num_tasks = 0;
 PeriodicTask* PeriodicTask::_tasks[PeriodicTask::max_tasks];
--- a/hotspot/src/share/vm/runtime/thread.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -85,18 +85,6 @@
 #include "utilities/events.hpp"
 #include "utilities/preserveException.hpp"
 #include "utilities/macros.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
@@ -4111,8 +4099,8 @@
   SharedHeap* sh = SharedHeap::heap();
   // Cannot yet substitute active_workers for n_par_threads
   // because of G1CollectedHeap::verify() use of
-  // SharedHeap::process_strong_roots().  n_par_threads == 0 will
-  // turn off parallelism in process_strong_roots while active_workers
+  // SharedHeap::process_roots().  n_par_threads == 0 will
+  // turn off parallelism in process_roots while active_workers
   // is being used for parallelism elsewhere.
   bool is_par = sh->n_par_threads() > 0;
   assert(!is_par ||
@@ -4386,7 +4374,7 @@
            if (Yields > 5) {
              os::naked_short_sleep(1);
            } else {
-             os::NakedYield();
+             os::naked_yield();
              ++Yields;
            }
         } else {
@@ -4404,6 +4392,12 @@
   // It's safe if subsequent LDs and STs float "up" into the critical section,
   // but prior LDs and STs within the critical section can't be allowed
   // to reorder or float past the ST that releases the lock.
+  // Loads and stores in the critical section - which appear in program
+  // order before the store that releases the lock - must also appear
+  // before the store that releases the lock in memory visibility order.
+  // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
+  // the ST of 0 into the lock-word which releases the lock, so fence
+  // more than covers this on all platforms.
   *adr = 0;
 }
 
@@ -4585,18 +4579,25 @@
 // This implementation pops from the head of the list.  This is unfair,
 // but tends to provide excellent throughput as hot threads remain hot.
 // (We wake recently run threads first).
-
+//
+// All paths through muxRelease() will execute a CAS.
+// Release consistency -- We depend on the CAS in muxRelease() to provide full
+// bidirectional fence/MEMBAR semantics, ensuring that all prior memory operations
+// executed within the critical section are complete and globally visible before the
+// store (CAS) to the lock-word that releases the lock becomes globally visible.
 void Thread::muxRelease (volatile intptr_t * Lock)  {
   for (;;) {
     const intptr_t w = Atomic::cmpxchg_ptr(0, Lock, LOCKBIT);
     assert(w & LOCKBIT, "invariant");
     if (w == LOCKBIT) return;
-    ParkEvent * List = (ParkEvent *)(w & ~LOCKBIT);
+    ParkEvent * const List = (ParkEvent *) (w & ~LOCKBIT);
     assert(List != NULL, "invariant");
     assert(List->OnList == intptr_t(Lock), "invariant");
-    ParkEvent * nxt = List->ListNext;
+    ParkEvent * const nxt = List->ListNext;
+    guarantee((intptr_t(nxt) & LOCKBIT) == 0, "invariant");
 
     // The following CAS() releases the lock and pops the head element.
+    // The CAS() also ratifies the previously fetched lock-word value.
     if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) {
       continue;
     }
--- a/hotspot/src/share/vm/runtime/thread.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -452,7 +452,7 @@
 private:
   bool claim_oops_do_par_case(int collection_parity);
 public:
-  // Requires that "collection_parity" is that of the current strong roots
+  // Requires that "collection_parity" is that of the current roots
   // iteration.  If "is_par" is false, sets the parity of "this" to
   // "collection_parity", and returns "true".  If "is_par" is true,
   // uses an atomic instruction to set the current threads parity to
@@ -1761,34 +1761,6 @@
   return (CompilerThread*)this;
 }
 
-inline bool JavaThread::stack_guard_zone_unused() {
-  return _stack_guard_state == stack_guard_unused;
-}
-
-inline bool JavaThread::stack_yellow_zone_disabled() {
-  return _stack_guard_state == stack_guard_yellow_disabled;
-}
-
-inline bool JavaThread::stack_yellow_zone_enabled() {
-#ifdef ASSERT
-  if (os::uses_stack_guard_pages()) {
-    assert(_stack_guard_state != stack_guard_unused, "guard pages must be in use");
-  }
-#endif
-    return _stack_guard_state == stack_guard_enabled;
-}
-
-inline size_t JavaThread::stack_available(address cur_sp) {
-  // This code assumes java stacks grow down
-  address low_addr; // Limit on the address for deepest stack depth
-  if (_stack_guard_state == stack_guard_unused) {
-    low_addr =  stack_base() - stack_size();
-  } else {
-    low_addr = stack_yellow_zone_base();
-  }
-  return cur_sp > low_addr ? cur_sp - low_addr : 0;
-}
-
 // A thread used for Compilation.
 class CompilerThread : public JavaThread {
   friend class VMStructs;
@@ -1863,7 +1835,6 @@
   return JavaThread::current()->as_CompilerThread();
 }
 
-
 // The active thread queue. It also keeps track of the current used
 // thread priorities.
 class Threads: AllStatic {
--- a/hotspot/src/share/vm/runtime/thread.inline.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/thread.inline.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
 #define SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
 
 #include "runtime/atomic.inline.hpp"
+#include "runtime/os.inline.hpp"
 #include "runtime/thread.hpp"
 #ifdef TARGET_OS_FAMILY_linux
 # include "thread_linux.inline.hpp"
@@ -136,4 +137,32 @@
   OrderAccess::fence();
 }
 
+inline bool JavaThread::stack_guard_zone_unused() {
+  return _stack_guard_state == stack_guard_unused;
+}
+
+inline bool JavaThread::stack_yellow_zone_disabled() {
+  return _stack_guard_state == stack_guard_yellow_disabled;
+}
+
+inline size_t JavaThread::stack_available(address cur_sp) {
+  // This code assumes java stacks grow down
+  address low_addr; // Limit on the address for deepest stack depth
+  if (_stack_guard_state == stack_guard_unused) {
+    low_addr =  stack_base() - stack_size();
+  } else {
+    low_addr = stack_yellow_zone_base();
+  }
+  return cur_sp > low_addr ? cur_sp - low_addr : 0;
+}
+
+inline bool JavaThread::stack_yellow_zone_enabled() {
+#ifdef ASSERT
+  if (os::uses_stack_guard_pages()) {
+    assert(_stack_guard_state != stack_guard_unused, "guard pages must be in use");
+  }
+#endif
+  return _stack_guard_state == stack_guard_enabled;
+}
+
 #endif // SHARE_VM_RUNTIME_THREAD_INLINE_HPP
--- a/hotspot/src/share/vm/runtime/threadLocalStorage.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/threadLocalStorage.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,20 +23,9 @@
  */
 
 #include "precompiled.hpp"
+#include "runtime/os.inline.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadLocalStorage.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 
 // static member initialization
 int ThreadLocalStorage::_thread_index = -1;
--- a/hotspot/src/share/vm/runtime/timer.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/timer.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -26,21 +26,6 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/timer.hpp"
 #include "utilities/ostream.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_aix
-# include "os_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 
 double TimeHelper::counter_to_seconds(jlong counter) {
   double count = (double) counter;
--- a/hotspot/src/share/vm/runtime/vframe.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/vframe.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -260,66 +260,156 @@
   return fr().interpreter_frame_method();
 }
 
-StackValueCollection* interpretedVFrame::locals() const {
-  int length = method()->max_locals();
+static StackValue* create_stack_value_from_oop_map(const InterpreterOopMap& oop_mask,
+                                                   int index,
+                                                   const intptr_t* const addr) {
+
+  assert(index >= 0 &&
+         index < oop_mask.number_of_entries(), "invariant");
 
-  if (method()->is_native()) {
-    // If the method is native, max_locals is not telling the truth.
-    // maxlocals then equals the size of parameters
-    length = method()->size_of_parameters();
+  // categorize using oop_mask
+  if (oop_mask.is_oop(index)) {
+    // reference (oop) "r"
+    Handle h(addr != NULL ? (*(oop*)addr) : (oop)NULL);
+    return new StackValue(h);
+  }
+  // value (integer) "v"
+  return new StackValue(addr != NULL ? *addr : 0);
+}
+
+static bool is_in_expression_stack(const frame& fr, const intptr_t* const addr) {
+  assert(addr != NULL, "invariant");
+
+  // Ensure to be 'inside' the expresion stack (i.e., addr >= sp for Intel).
+  // In case of exceptions, the expression stack is invalid and the sp
+  // will be reset to express this condition.
+  if (frame::interpreter_frame_expression_stack_direction() > 0) {
+    return addr <= fr.interpreter_frame_tos_address();
   }
 
-  StackValueCollection* result = new StackValueCollection(length);
+  return addr >= fr.interpreter_frame_tos_address();
+}
+
+static void stack_locals(StackValueCollection* result,
+                         int length,
+                         const InterpreterOopMap& oop_mask,
+                         const frame& fr) {
+
+  assert(result != NULL, "invariant");
+
+  for (int i = 0; i < length; ++i) {
+    const intptr_t* const addr = fr.interpreter_frame_local_at(i);
+    assert(addr != NULL, "invariant");
+    assert(addr >= fr.sp(), "must be inside the frame");
+
+    StackValue* const sv = create_stack_value_from_oop_map(oop_mask, i, addr);
+    assert(sv != NULL, "sanity check");
+
+    result->add(sv);
+  }
+}
+
+static void stack_expressions(StackValueCollection* result,
+                              int length,
+                              int max_locals,
+                              const InterpreterOopMap& oop_mask,
+                              const frame& fr) {
+
+  assert(result != NULL, "invariant");
 
-  // Get oopmap describing oops and int for current bci
+  for (int i = 0; i < length; ++i) {
+    const intptr_t* addr = fr.interpreter_frame_expression_stack_at(i);
+    assert(addr != NULL, "invariant");
+    if (!is_in_expression_stack(fr, addr)) {
+      // Need to ensure no bogus escapes.
+      addr = NULL;
+    }
+
+    StackValue* const sv = create_stack_value_from_oop_map(oop_mask,
+                                                           i + max_locals,
+                                                           addr);
+    assert(sv != NULL, "sanity check");
+
+    result->add(sv);
+  }
+}
+
+StackValueCollection* interpretedVFrame::locals() const {
+  return stack_data(false);
+}
+
+StackValueCollection* interpretedVFrame::expressions() const {
+  return stack_data(true);
+}
+
+/*
+ * Worker routine for fetching references and/or values
+ * for a particular bci in the interpretedVFrame.
+ *
+ * Returns data for either "locals" or "expressions",
+ * using bci relative oop_map (oop_mask) information.
+ *
+ * @param expressions  bool switch controlling what data to return
+                       (false == locals / true == expression)
+ *
+ */
+StackValueCollection* interpretedVFrame::stack_data(bool expressions) const {
+
   InterpreterOopMap oop_mask;
+  // oopmap for current bci
   if (TraceDeoptimization && Verbose) {
-    // need the current JavaThread and not thread()
     methodHandle m_h(Thread::current(), method());
     OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask);
   } else {
     method()->mask_for(bci(), &oop_mask);
   }
-  // handle locals
-  for(int i=0; i < length; i++) {
-    // Find stack location
-    intptr_t *addr = locals_addr_at(i);
+
+  const int mask_len = oop_mask.number_of_entries();
+
+  // If the method is native, method()->max_locals() is not telling the truth.
+  // For our purposes, max locals instead equals the size of parameters.
+  const int max_locals = method()->is_native() ?
+    method()->size_of_parameters() : method()->max_locals();
+
+  assert(mask_len >= max_locals, "invariant");
+
+  const int length = expressions ? mask_len - max_locals : max_locals;
+  assert(length >= 0, "invariant");
 
-    // Depending on oop/int put it in the right package
-    StackValue *sv;
-    if (oop_mask.is_oop(i)) {
-      // oop value
-      Handle h(*(oop *)addr);
-      sv = new StackValue(h);
-    } else {
-      // integer
-      sv = new StackValue(*addr);
-    }
-    assert(sv != NULL, "sanity check");
-    result->add(sv);
+  StackValueCollection* const result = new StackValueCollection(length);
+
+  if (0 == length) {
+    return result;
   }
+
+  if (expressions) {
+    stack_expressions(result, length, max_locals, oop_mask, fr());
+  } else {
+    stack_locals(result, length, oop_mask, fr());
+  }
+
+  assert(length == result->size(), "invariant");
+
   return result;
 }
 
 void interpretedVFrame::set_locals(StackValueCollection* values) const {
   if (values == NULL || values->size() == 0) return;
 
-  int length = method()->max_locals();
-  if (method()->is_native()) {
-    // If the method is native, max_locals is not telling the truth.
-    // maxlocals then equals the size of parameters
-    length = method()->size_of_parameters();
-  }
+  // If the method is native, max_locals is not telling the truth.
+  // maxlocals then equals the size of parameters
+  const int max_locals = method()->is_native() ?
+    method()->size_of_parameters() : method()->max_locals();
 
-  assert(length == values->size(), "Mismatch between actual stack format and supplied data");
+  assert(max_locals == values->size(), "Mismatch between actual stack format and supplied data");
 
   // handle locals
-  for (int i = 0; i < length; i++) {
+  for (int i = 0; i < max_locals; i++) {
     // Find stack location
     intptr_t *addr = locals_addr_at(i);
 
     // Depending on oop/int put it in the right package
-    StackValue *sv = values->at(i);
+    const StackValue* const sv = values->at(i);
     assert(sv != NULL, "sanity check");
     if (sv->type() == T_OBJECT) {
       *(oop *) addr = (sv->get_obj())();
@@ -329,61 +419,6 @@
   }
 }
 
-StackValueCollection* interpretedVFrame::expressions() const {
-
-  InterpreterOopMap oop_mask;
-
-  if (!method()->is_native()) {
-    // Get oopmap describing oops and int for current bci
-    if (TraceDeoptimization && Verbose) {
-      // need the current JavaThread and not thread()
-      methodHandle m_h(Thread::current(), method());
-      OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask);
-    } else {
-      method()->mask_for(bci(), &oop_mask);
-    }
-  }
-
-  // If the bci is a call instruction, i.e. any of the invoke* instructions,
-  // the InterpreterOopMap does not include expression/operand stack liveness
-  // info in the oop_mask/bit_mask. This can lead to a discrepancy of what
-  // is actually on the expression stack compared to what is given by the
-  // oop_map. We need to use the length reported in the oop_map.
-  int length = oop_mask.expression_stack_size();
-
-  assert(fr().interpreter_frame_expression_stack_size() >= length,
-    "error in expression stack!");
-
-  StackValueCollection* result = new StackValueCollection(length);
-
-  if (0 == length) {
-    return result;
-  }
-
-  int nof_locals = method()->max_locals();
-
-  // handle expressions
-  for(int i=0; i < length; i++) {
-    // Find stack location
-    intptr_t *addr = fr().interpreter_frame_expression_stack_at(i);
-
-    // Depending on oop/int put it in the right package
-    StackValue *sv;
-    if (oop_mask.is_oop(i + nof_locals)) {
-      // oop value
-      Handle h(*(oop *)addr);
-      sv = new StackValue(h);
-    } else {
-      // integer
-      sv = new StackValue(*addr);
-    }
-    assert(sv != NULL, "sanity check");
-    result->add(sv);
-  }
-  return result;
-}
-
-
 // ------------- cChunk --------------
 
 entryVFrame::entryVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread)
--- a/hotspot/src/share/vm/runtime/vframe.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/vframe.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -186,7 +186,7 @@
  private:
   static const int bcp_offset;
   intptr_t* locals_addr_at(int offset) const;
-
+  StackValueCollection* stack_data(bool expressions) const;
   // returns where the parameters starts relative to the frame pointer
   int start_of_parameters() const;
 
--- a/hotspot/src/share/vm/runtime/virtualspace.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/runtime/virtualspace.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -27,21 +27,6 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/virtualspace.hpp"
 #include "services/memTracker.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_aix
-# include "os_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
--- a/hotspot/src/share/vm/services/heapDumper.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/services/heapDumper.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -33,6 +33,7 @@
 #include "oops/objArrayKlass.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/jniHandles.hpp"
+#include "runtime/os.hpp"
 #include "runtime/reflectionUtils.hpp"
 #include "runtime/vframe.hpp"
 #include "runtime/vmThread.hpp"
--- a/hotspot/src/share/vm/services/memTracker.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/services/memTracker.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -484,7 +484,7 @@
       // as short sleep.
       os::naked_short_sleep(1);
 #else
-      os::NakedYield();
+      os::naked_yield();
 #endif
     }
   }
--- a/hotspot/src/share/vm/services/memoryService.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/services/memoryService.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -136,7 +136,6 @@
         break;
 #if INCLUDE_ALL_GCS
       case Generation::ParNew:
-      case Generation::ASParNew:
         _minor_gc_manager = MemoryManager::get_parnew_memory_manager();
         break;
 #endif // INCLUDE_ALL_GCS
@@ -268,7 +267,6 @@
 
 #if INCLUDE_ALL_GCS
     case Generation::ParNew:
-    case Generation::ASParNew:
     {
       assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers");
       // Add a memory pool for each space and young gen doesn't
@@ -300,7 +298,6 @@
 
 #if INCLUDE_ALL_GCS
     case Generation::ConcurrentMarkSweep:
-    case Generation::ASConcurrentMarkSweep:
     {
       assert(major_mgr != NULL && minor_mgr == NULL, "Should have only one manager");
       ConcurrentMarkSweepGeneration* cms = (ConcurrentMarkSweepGeneration*) gen;
@@ -548,23 +545,20 @@
 // GC manager type depends on the type of Generation. Depending on the space
 // availablity and vm options the gc uses major gc manager or minor gc
 // manager or both. The type of gc manager depends on the generation kind.
-// For DefNew, ParNew and ASParNew generation doing scavenge gc uses minor
-// gc manager (so _fullGC is set to false ) and for other generation kinds
-// doing mark-sweep-compact uses major gc manager (so _fullGC is set
-// to true).
+// For DefNew and ParNew generation doing scavenge gc uses minor gc manager (so
+// _fullGC is set to false ) and for other generation kinds doing
+// mark-sweep-compact uses major gc manager (so _fullGC is set to true).
 TraceMemoryManagerStats::TraceMemoryManagerStats(Generation::Name kind, GCCause::Cause cause) {
   switch (kind) {
     case Generation::DefNew:
 #if INCLUDE_ALL_GCS
     case Generation::ParNew:
-    case Generation::ASParNew:
 #endif // INCLUDE_ALL_GCS
       _fullGC=false;
       break;
     case Generation::MarkSweepCompact:
 #if INCLUDE_ALL_GCS
     case Generation::ConcurrentMarkSweep:
-    case Generation::ASConcurrentMarkSweep:
 #endif // INCLUDE_ALL_GCS
       _fullGC=true;
       break;
--- a/hotspot/src/share/vm/utilities/accessFlags.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/utilities/accessFlags.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,22 +26,6 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.inline.hpp"
 #include "utilities/accessFlags.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_aix
-# include "os_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
-
 
 void AccessFlags::atomic_set_bits(jint bits) {
   // Atomically update the flags with the bits given
--- a/hotspot/src/share/vm/utilities/array.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/utilities/array.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -305,6 +305,7 @@
   friend class MetadataFactory;
   friend class VMStructs;
   friend class MethodHandleCompiler;           // special case
+  friend class WhiteBox;
 protected:
   int _length;                                 // the number of array elements
   T   _data[1];                                // the array memory
@@ -326,6 +327,31 @@
 
   static size_t byte_sizeof(int length) { return sizeof(Array<T>) + MAX2(length - 1, 0) * sizeof(T); }
 
+  // WhiteBox API helper.
+  // Can't distinguish between array of length 0 and length 1,
+  // will always return 0 in those cases.
+  static int bytes_to_length(size_t bytes)       {
+    assert(is_size_aligned(bytes, BytesPerWord), "Must be, for now");
+
+    if (sizeof(Array<T>) >= bytes) {
+      return 0;
+    }
+
+    size_t left = bytes - sizeof(Array<T>);
+    assert(is_size_aligned(left, sizeof(T)), "Must be");
+
+    size_t elements = left / sizeof(T);
+    assert(elements <= (size_t)INT_MAX, err_msg("number of elements " SIZE_FORMAT "doesn't fit into an int.", elements));
+
+    int length = (int)elements;
+
+    assert((size_t)size(length) * BytesPerWord == bytes,
+        err_msg("Expected: " SIZE_FORMAT " got: " SIZE_FORMAT,
+                bytes, (size_t)size(length) * BytesPerWord));
+
+    return length;
+  }
+
   explicit Array(int length) : _length(length) {
     assert(length >= 0, "illegal length");
   }
--- a/hotspot/src/share/vm/utilities/bitMap.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/utilities/bitMap.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -28,22 +28,6 @@
 #include "runtime/atomic.inline.hpp"
 #include "utilities/bitMap.inline.hpp"
 #include "utilities/copy.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_aix
-# include "os_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
-
 
 BitMap::BitMap(bm_word_t* map, idx_t size_in_bits) :
   _map(map), _size(size_in_bits), _map_allocator(false)
--- a/hotspot/src/share/vm/utilities/debug.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/utilities/debug.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -42,6 +42,7 @@
 #include "runtime/atomic.inline.hpp"
 #include "runtime/frame.hpp"
 #include "runtime/java.hpp"
+#include "runtime/os.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubCodeGenerator.hpp"
 #include "runtime/stubRoutines.hpp"
@@ -52,18 +53,6 @@
 #include "utilities/events.hpp"
 #include "utilities/top.hpp"
 #include "utilities/vmError.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 
 #ifndef ASSERT
 #  ifdef _DEBUG
--- a/hotspot/src/share/vm/utilities/events.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/utilities/events.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "memory/allocation.inline.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/os.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadCritical.hpp"
--- a/hotspot/src/share/vm/utilities/histogram.hpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/utilities/histogram.hpp	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,21 +28,6 @@
 #include "memory/allocation.hpp"
 #include "runtime/os.hpp"
 #include "utilities/growableArray.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_aix
-# include "os_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 
 // This class provides a framework for collecting various statistics.
 // The current implementation is oriented towards counting invocations
--- a/hotspot/src/share/vm/utilities/ostream.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/utilities/ostream.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -27,25 +27,11 @@
 #include "gc_implementation/shared/gcId.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/arguments.hpp"
+#include "runtime/os.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/ostream.hpp"
 #include "utilities/top.hpp"
 #include "utilities/xmlstream.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_aix
-# include "os_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 
 extern "C" void jio_print(const char* s); // Declarationtion of jvm method
 
--- a/hotspot/src/share/vm/utilities/taskqueue.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/utilities/taskqueue.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -142,7 +142,7 @@
 
 void ParallelTaskTerminator::yield() {
   assert(_offered_termination <= _n_threads, "Invariant");
-  os::yield();
+  os::naked_yield();
 }
 
 void ParallelTaskTerminator::sleep(uint millis) {
--- a/hotspot/src/share/vm/utilities/vmError.cpp	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/src/share/vm/utilities/vmError.cpp	Wed Jul 05 19:50:54 2017 +0200
@@ -989,7 +989,6 @@
       if (fd != -1) {
         out.print_raw("# An error report file with more information is saved as:\n# ");
         out.print_raw_cr(buffer);
-        os::set_error_file(buffer);
 
         log.set_fd(fd);
       } else {
--- a/hotspot/test/compiler/6775880/Test.java	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/test/compiler/6775880/Test.java	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
  * @test
  * @bug 6775880
  * @summary EA +DeoptimizeALot: assert(mon_info->owner()->is_locked(),"object must be locked now")
- * @compile -source 1.4 -target 1.4 Test.java
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xbatch -XX:+DoEscapeAnalysis -XX:+DeoptimizeALot -XX:CompileCommand=exclude,java.lang.AbstractStringBuilder::append Test
  */
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/g1/TestDeferredRSUpdate.java	Wed Jul 05 19:50:54 2017 +0200
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestDeferredRSUpdate
+ * @bug 8040977
+ * @summary Ensure that running with -XX:-G1DeferredRSUpdate does not crash the VM
+ * @key gc
+ * @library /testlibrary
+ */
+
+import com.oracle.java.testlibrary.ProcessTools;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+
+public class TestDeferredRSUpdate {
+  public static void main(String[] args) throws Exception {
+    GCTest.main(args);
+
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
+                                                              "-Xmx10M",
+                                                              // G1DeferredRSUpdate is a develop option, but we cannot limit execution of this test to only debug VMs.
+                                                              "-XX:+IgnoreUnrecognizedVMOptions",
+                                                              "-XX:-G1DeferredRSUpdate",
+                                                              GCTest.class.getName());
+
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+    output.shouldHaveExitValue(0);
+  }
+
+  static class GCTest {
+    private static Object[] garbage = new Object[32];
+
+    public static void main(String [] args) {
+      System.out.println("Creating garbage");
+      // Create 128MB of garbage. This should result in at least one minor GC, with
+      // some objects copied to old gen. As references from old to young are installed,
+      // the crash due to the use before initialize occurs.
+      Object prev = null;
+      Object prevPrev = null;
+      for (int i = 0; i < 1024; i++) {
+        Object[] next = new Object[32 * 1024];
+        next[0] = prev;
+        next[1] = prevPrev;
+
+        Object[] cur = (Object[]) garbage[i % garbage.length];
+        if (cur != null) {
+          cur[0] = null;
+          cur[1] = null;
+        }
+        garbage[i % garbage.length] = next;
+
+        prevPrev = prev;
+        prev = next;
+      }
+      System.out.println("Done");
+    }
+  }
+}
--- a/hotspot/test/runtime/6626217/Test6626217.sh	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/test/runtime/6626217/Test6626217.sh	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 # 
-#  Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+#  Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
 #  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 #  This code is free software; you can redistribute it and/or modify it
@@ -54,7 +54,7 @@
 
 # Compile all the usual suspects, including the default 'many_loader'
 ${CP} many_loader1.java.foo many_loader.java
-${JAVAC} ${TESTJAVACOPTS} -source 1.4 -target 1.4 -Xlint *.java
+${JAVAC} ${TESTJAVACOPTS} -Xlint *.java
 
 # Rename the class files, so the custom loader (and not the system loader) will find it
 ${MV} from_loader2.class from_loader2.impl2
@@ -62,7 +62,7 @@
 # Compile the next version of 'many_loader'
 ${MV} many_loader.class many_loader.impl1
 ${CP} many_loader2.java.foo many_loader.java
-${JAVAC} ${TESTJAVACOPTS} -source 1.4 -target 1.4 -Xlint many_loader.java
+${JAVAC} ${TESTJAVACOPTS} -Xlint many_loader.java
 
 # Rename the class file, so the custom loader (and not the system loader) will find it
 ${MV} many_loader.class many_loader.impl2
--- a/hotspot/test/runtime/8003720/Test8003720.java	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/test/runtime/8003720/Test8003720.java	Wed Jul 05 19:50:54 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
  * @test
  * @bug 8003720
  * @summary Method in interpreter stack frame can be deallocated
- * @compile -XDignore.symbol.file -source 1.7 -target 1.7 Victim.java
+ * @compile -XDignore.symbol.file Victim.java
  * @run main/othervm -Xverify:all -Xint Test8003720
  */
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/verifier/OverriderMsg.java	Wed Jul 05 19:50:54 2017 +0200
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.io.File;
+import java.io.FileOutputStream;
+import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.MethodVisitor;
+import static jdk.internal.org.objectweb.asm.Opcodes.*;
+import com.oracle.java.testlibrary.*;
+
+/*
+ * @test OverriderMsg
+ * @bug 8026894
+ * @library /testlibrary
+ * @compile -XDignore.symbol.file OverriderMsg.java
+ * @run main/othervm OverriderMsg
+ */
+
+// This test checks that the super class name is included in the message when
+// a method is detected overriding a final method in its super class.  The
+// asm part of the test creates these two classes:
+//
+//     public class HasFinal {
+//         public final void m(String s) { }
+//     }
+//
+//     public class Overrider extends HasFinal {
+//         public void m(String s) { }
+//         public static void main(String[] args) { }
+//     }
+//
+public class OverriderMsg {
+
+    public static void dump_HasFinal () throws Exception {
+
+        ClassWriter cw = new ClassWriter(0);
+        MethodVisitor mv;
+
+        cw.visit(V1_7, ACC_PUBLIC + ACC_SUPER, "HasFinal", null, "java/lang/Object", null);
+
+        {
+            mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
+            mv.visitCode();
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "<init>", "()V");
+            mv.visitInsn(RETURN);
+            mv.visitMaxs(1, 1);
+            mv.visitEnd();
+        }
+        {
+            mv = cw.visitMethod(ACC_PUBLIC + ACC_FINAL, "m", "(Ljava/lang/String;)V", null, null);
+            mv.visitCode();
+            mv.visitInsn(RETURN);
+            mv.visitMaxs(0, 2);
+            mv.visitEnd();
+        }
+        cw.visitEnd();
+        try (FileOutputStream fos = new FileOutputStream(new File("HasFinal.class"))) {
+             fos.write(cw.toByteArray());
+        }
+    }
+
+
+    public static void dump_Overrider () throws Exception {
+
+        ClassWriter cw = new ClassWriter(0);
+        MethodVisitor mv;
+        cw.visit(V1_7, ACC_PUBLIC + ACC_SUPER, "Overrider", null, "HasFinal", null);
+
+        {
+            mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
+            mv.visitCode();
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitMethodInsn(INVOKESPECIAL, "HasFinal", "<init>", "()V");
+            mv.visitInsn(RETURN);
+            mv.visitMaxs(1, 1);
+            mv.visitEnd();
+        }
+        {
+            mv = cw.visitMethod(ACC_PUBLIC, "m", "(Ljava/lang/String;)V", null, null);
+            mv.visitCode();
+            mv.visitInsn(RETURN);
+            mv.visitMaxs(0, 2);
+            mv.visitEnd();
+        }
+        {
+            mv = cw.visitMethod(ACC_PUBLIC + ACC_STATIC, "main", "([Ljava/lang/String;)V", null, null);
+            mv.visitCode();
+            mv.visitInsn(RETURN);
+            mv.visitMaxs(0, 1);
+            mv.visitEnd();
+        }
+        cw.visitEnd();
+
+        try (FileOutputStream fos = new FileOutputStream(new File("Overrider.class"))) {
+             fos.write(cw.toByteArray());
+        }
+    }
+
+
+    public static void main(String... args) throws Exception {
+        dump_HasFinal();
+        dump_Overrider();
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(true, "-cp", ".",  "Overrider");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain(
+            "java.lang.VerifyError: class Overrider overrides final method HasFinal.m(Ljava/lang/String;)V");
+        output.shouldHaveExitValue(1);
+    }
+
+}
--- a/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Fri Jul 18 08:25:58 2014 -0700
+++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Wed Jul 05 19:50:54 2017 +0200
@@ -142,6 +142,8 @@
 
   // Memory
   public native void readReservedMemory();
+  public native long allocateMetaspace(ClassLoader classLoader, long size);
+  public native void freeMetaspace(ClassLoader classLoader, long addr, long size);
 
   // force Full GC
   public native void fullGC();
--- a/test/Makefile	Fri Jul 18 08:25:58 2014 -0700
+++ b/test/Makefile	Wed Jul 05 19:50:54 2017 +0200
@@ -66,6 +66,32 @@
 hotspot_%:
 	@$(NO_STOPPING)$(call SUBDIR_TEST, $(HOTSPOT_DIR), TEST="$@" $@)
 
+#
+# jtreg_tests
+#
+# Invocation:
+#
+# make jtreg_tests TESTDIRS=<test-dirs>
+#
+# where <test-dirs> is something like '../<component>/test/runtime',
+# <component> in turn being one of the top level directories (for
+# example 'hotspot').
+#
+# The below will strip the path prefix and delegate to the
+# corresponding ../<component>/test/Makefile.
+
+ifneq ($(TESTDIRS),)
+# Extract the component from ../<component>/...
+COMPONENT=$(word 2,$(subst /, ,$(TESTDIRS)))
+
+# Strip off the ../<component>/test prefix and pass the rest as TESTDIRS
+# to the delegate Makefile
+TESTDIRS_TESTS=$(patsubst ../$(COMPONENT)/test/%,%,$(TESTDIRS))
+endif
+
+jtreg_tests:
+	@$(NO_STOPPING)$(call SUBDIR_TEST, $(TOPDIR)/$(COMPONENT), TESTDIRS=$(TESTDIRS_TESTS) $@)
+
 ################################################################
 
 # Phony targets (e.g. these are not filenames)