Merge jdk8-b90
authorduke
Wed, 05 Jul 2017 18:55:08 +0200
changeset 17346 f09ab0c41618
parent 17345 32dd92b1907a (diff)
parent 17290 7c16d0c245d7 (current diff)
child 17347 2b231864b7f3
child 17348 12f44049dd4f
child 17352 8b5c1e11c27b
child 17355 4f531492e774
child 17356 ac6d56a603a3
child 17403 038d29f71bbf
child 17504 85e7103d1e79
child 17510 63e3e07dfc0d
child 17512 7344f26d7a42
child 17532 721a9fe55519
child 17541 1b09fbc97609
child 17543 482fdbf61490
child 18062 b16b51f1de87
child 18250 6a8b17cf0cd9
child 18309 2460d62607aa
child 18342 31ddc9c9eb12
child 18362 8d823fbb258c
child 18401 2adf2fde559e
Merge
--- a/.hgtags-top-repo	Wed Jul 05 18:54:28 2017 +0200
+++ b/.hgtags-top-repo	Wed Jul 05 18:55:08 2017 +0200
@@ -210,3 +210,4 @@
 df9b5240f0a76c91cfe1a5b39da4d08df56e05be jdk8-b86
 b9415faa7066a4d3b16d466556d5428446918d95 jdk8-b87
 e1a929afcfc492470d50be0b6b0e8dc77d3760b9 jdk8-b88
+892a0196d10c67f3a12f0eefb0bb536e423d8868 jdk8-b89
--- a/corba/.hgtags	Wed Jul 05 18:54:28 2017 +0200
+++ b/corba/.hgtags	Wed Jul 05 18:55:08 2017 +0200
@@ -210,3 +210,4 @@
 44a8ce4a759f2668ff434661a93ff462ea472478 jdk8-b86
 f1709874d55a06bc3d5dfa02dbcdfbc59f4cba34 jdk8-b87
 4e3a881ebb1ee96ce0872508b0066d74f310dbfa jdk8-b88
+fe4150590ee597f4e125fea950aa3b352622cc2d jdk8-b89
--- a/hotspot/.hgtags	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/.hgtags	Wed Jul 05 18:55:08 2017 +0200
@@ -339,3 +339,5 @@
 c4af77d2045476c56fbf3f914b336bb1b7cd18af hs25-b30
 8482058e74bc8c1a890e6f3be3eff192dba6ce67 jdk8-b88
 4ec91349972255650f97bedfd07e6423e02428cf hs25-b31
+9c1fe0b419b40a9ecdd1653cc9af1b6d67a12c46 jdk8-b89
+69494caf57908ba2c8efa9eaaa472b4d1875588a hs25-b32
--- a/hotspot/agent/src/os/bsd/ps_core.c	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/agent/src/os/bsd/ps_core.c	Wed Jul 05 18:55:08 2017 +0200
@@ -199,10 +199,10 @@
 //---------------------------------------------------------------
 // Part of the class sharing workaround:
 //
-// With class sharing, pages are mapped from classes[_g].jsa file.
+// With class sharing, pages are mapped from classes.jsa file.
 // The read-only class sharing pages are mapped as MAP_SHARED,
 // PROT_READ pages. These pages are not dumped into core dump.
-// With this workaround, these pages are read from classes[_g].jsa.
+// With this workaround, these pages are read from classes.jsa.
 
 // FIXME: !HACK ALERT!
 // The format of sharing achive file header is needed to read shared heap
@@ -298,14 +298,12 @@
   lib_info* lib = ph->libs;
   while (lib != NULL) {
     // we are iterating over shared objects from the core dump. look for
-    // libjvm[_g].so.
+    // libjvm.so.
     const char *jvm_name = 0;
 #ifdef __APPLE__
-    if ((jvm_name = strstr(lib->name, "/libjvm.dylib")) != 0 ||
-        (jvm_name = strstr(lib->name, "/libjvm_g.dylib")) != 0)
+    if ((jvm_name = strstr(lib->name, "/libjvm.dylib")) != 0)
 #else
-    if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0 ||
-        (jvm_name = strstr(lib->name, "/libjvm_g.so")) != 0)
+    if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0)
 #endif // __APPLE__
     {
       char classes_jsa[PATH_MAX];
@@ -389,7 +387,7 @@
       }
 
       ph->core->classes_jsa_fd = fd;
-      // add read-only maps from classes[_g].jsa to the list of maps
+      // add read-only maps from classes.jsa to the list of maps
       for (m = 0; m < NUM_SHARED_MAPS; m++) {
         if (header._space[m]._read_only) {
           base = (uintptr_t) header._space[m]._base;
--- a/hotspot/agent/src/os/linux/ps_core.c	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/agent/src/os/linux/ps_core.c	Wed Jul 05 18:55:08 2017 +0200
@@ -195,10 +195,10 @@
 //---------------------------------------------------------------
 // Part of the class sharing workaround:
 //
-// With class sharing, pages are mapped from classes[_g].jsa file.
+// With class sharing, pages are mapped from classes.jsa file.
 // The read-only class sharing pages are mapped as MAP_SHARED,
 // PROT_READ pages. These pages are not dumped into core dump.
-// With this workaround, these pages are read from classes[_g].jsa.
+// With this workaround, these pages are read from classes.jsa.
 
 // FIXME: !HACK ALERT!
 // The format of sharing achive file header is needed to read shared heap
@@ -284,10 +284,9 @@
    lib_info* lib = ph->libs;
    while (lib != NULL) {
       // we are iterating over shared objects from the core dump. look for
-      // libjvm[_g].so.
+      // libjvm.so.
       const char *jvm_name = 0;
-      if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0 ||
-          (jvm_name = strstr(lib->name, "/libjvm_g.so")) != 0) {
+      if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0) {
          char classes_jsa[PATH_MAX];
          struct FileMapHeader header;
          size_t n = 0;
@@ -371,7 +370,7 @@
          }
 
          ph->core->classes_jsa_fd = fd;
-         // add read-only maps from classes[_g].jsa to the list of maps
+         // add read-only maps from classes.jsa to the list of maps
          for (m = 0; m < NUM_SHARED_MAPS; m++) {
             if (header._space[m]._read_only) {
                base = (uintptr_t) header._space[m]._base;
--- a/hotspot/agent/src/os/solaris/proc/saproc.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/agent/src/os/solaris/proc/saproc.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -589,8 +589,7 @@
   JNIEnv*   env = dbg->env;
   jobject this_obj = dbg->this_obj;
   const char* jvm_name = 0;
-  if ((jvm_name = strstr(obj_name, "libjvm.so")) != NULL ||
-      (jvm_name = strstr(obj_name, "libjvm_g.so")) != NULL) {
+  if ((jvm_name = strstr(obj_name, "libjvm.so")) != NULL) {
     jvm_name = obj_name;
   } else {
     return 0;
@@ -598,7 +597,7 @@
 
   struct ps_prochandle* ph = (struct ps_prochandle*) env->GetLongField(this_obj, p_ps_prochandle_ID);
 
-  // initialize classes[_g].jsa file descriptor field.
+  // initialize classes.jsa file descriptor field.
   dbg->env->SetIntField(this_obj, classes_jsa_fd_ID, -1);
 
   // check whether class sharing is on by reading variable "UseSharedSpaces"
@@ -641,7 +640,7 @@
 
   print_debug("looking for %s\n", classes_jsa);
 
-  // open the classes[_g].jsa
+  // open the classes.jsa
   int fd = libsaproc_open(classes_jsa, O_RDONLY);
   if (fd < 0) {
     char errMsg[ERR_MSG_SIZE];
@@ -651,7 +650,7 @@
     print_debug("opened shared archive file %s\n", classes_jsa);
   }
 
-  // parse classes[_g].jsa
+  // parse classes.jsa
   struct FileMapHeader* pheader = (struct FileMapHeader*) malloc(sizeof(struct FileMapHeader));
   if (pheader == NULL) {
     close(fd);
@@ -798,8 +797,8 @@
   if (! isProcess) {
     /*
      * With class sharing, shared perm. gen heap is allocated in with MAP_SHARED|PROT_READ.
-     * These pages are mapped from the file "classes[_g].jsa". MAP_SHARED pages are not dumped
-     * in Solaris core.To read shared heap pages, we have to read classes[_g].jsa file.
+     * These pages are mapped from the file "classes.jsa". MAP_SHARED pages are not dumped
+     * in Solaris core.To read shared heap pages, we have to read classes.jsa file.
      */
     Pobject_iter(ph, init_classsharing_workaround, &dbg);
     exception = env->ExceptionOccurred();
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java	Wed Jul 05 18:55:08 2017 +0200
@@ -24,20 +24,29 @@
 
 package sun.jvm.hotspot;
 
-import java.io.PrintStream;
-import java.net.*;
-import java.rmi.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.bsd.*;
-import sun.jvm.hotspot.debugger.proc.*;
-import sun.jvm.hotspot.debugger.remote.*;
-import sun.jvm.hotspot.debugger.windbg.*;
-import sun.jvm.hotspot.debugger.linux.*;
-import sun.jvm.hotspot.memory.*;
-import sun.jvm.hotspot.oops.*;
-import sun.jvm.hotspot.runtime.*;
-import sun.jvm.hotspot.types.*;
-import sun.jvm.hotspot.utilities.*;
+import java.rmi.RemoteException;
+
+import sun.jvm.hotspot.debugger.Debugger;
+import sun.jvm.hotspot.debugger.DebuggerException;
+import sun.jvm.hotspot.debugger.JVMDebugger;
+import sun.jvm.hotspot.debugger.MachineDescription;
+import sun.jvm.hotspot.debugger.MachineDescriptionAMD64;
+import sun.jvm.hotspot.debugger.MachineDescriptionIA64;
+import sun.jvm.hotspot.debugger.MachineDescriptionIntelX86;
+import sun.jvm.hotspot.debugger.MachineDescriptionSPARC32Bit;
+import sun.jvm.hotspot.debugger.MachineDescriptionSPARC64Bit;
+import sun.jvm.hotspot.debugger.NoSuchSymbolException;
+import sun.jvm.hotspot.debugger.bsd.BsdDebuggerLocal;
+import sun.jvm.hotspot.debugger.linux.LinuxDebuggerLocal;
+import sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal;
+import sun.jvm.hotspot.debugger.remote.RemoteDebugger;
+import sun.jvm.hotspot.debugger.remote.RemoteDebuggerClient;
+import sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer;
+import sun.jvm.hotspot.debugger.windbg.WindbgDebuggerLocal;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.types.TypeDataBase;
+import sun.jvm.hotspot.utilities.PlatformInfo;
+import sun.jvm.hotspot.utilities.UnsupportedPlatformException;
 
 /** <P> This class wraps much of the basic functionality and is the
  * highest-level factory for VM data structures. It makes it simple
@@ -475,7 +484,7 @@
     }
 
     private void setupJVMLibNamesSolaris() {
-        jvmLibNames = new String[] { "libjvm.so", "libjvm_g.so", "gamma_g" };
+        jvmLibNames = new String[] { "libjvm.so" };
     }
 
     //
@@ -507,7 +516,7 @@
     }
 
     private void setupJVMLibNamesWin32() {
-        jvmLibNames = new String[] { "jvm.dll", "jvm_g.dll" };
+        jvmLibNames = new String[] { "jvm.dll" };
     }
 
     //
@@ -547,7 +556,7 @@
     }
 
     private void setupJVMLibNamesLinux() {
-        jvmLibNames = new String[] { "libjvm.so", "libjvm_g.so" };
+        jvmLibNames = new String[] { "libjvm.so" };
     }
 
     //
@@ -572,7 +581,7 @@
     }
 
     private void setupJVMLibNamesBsd() {
-        jvmLibNames = new String[] { "libjvm.so", "libjvm_g.so" };
+        jvmLibNames = new String[] { "libjvm.so" };
     }
 
     //
@@ -595,7 +604,7 @@
     }
 
     private void setupJVMLibNamesDarwin() {
-        jvmLibNames = new String[] { "libjvm.dylib", "libjvm_g.dylib" };
+        jvmLibNames = new String[] { "libjvm.dylib" };
     }
 
     /** Convenience routine which should be called by per-platform
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/LinuxVtblAccess.java	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/LinuxVtblAccess.java	Wed Jul 05 18:55:08 2017 +0200
@@ -24,9 +24,9 @@
 
 package sun.jvm.hotspot;
 
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.types.*;
-import sun.jvm.hotspot.types.basic.*;
+import sun.jvm.hotspot.debugger.SymbolLookup;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.basic.BasicVtblAccess;
 
 public class LinuxVtblAccess extends BasicVtblAccess {
   private String vt;
@@ -35,8 +35,7 @@
                          String[] dllNames) {
     super(symbolLookup, dllNames);
 
-    if (symbolLookup.lookup("libjvm.so", "__vt_10JavaThread") != null ||
-        symbolLookup.lookup("libjvm_g.so", "__vt_10JavaThread") != null) {
+    if (symbolLookup.lookup("libjvm.so", "__vt_10JavaThread") != null) {
        // old C++ ABI
        vt = "__vt_";
     } else {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdDebuggerLocal.java	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdDebuggerLocal.java	Wed Jul 05 18:55:08 2017 +0200
@@ -24,17 +24,28 @@
 
 package sun.jvm.hotspot.debugger.bsd;
 
-import java.io.*;
-import java.net.*;
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.x86.*;
-import sun.jvm.hotspot.debugger.cdbg.*;
-import sun.jvm.hotspot.utilities.*;
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.debugger.DebuggerBase;
+import sun.jvm.hotspot.debugger.DebuggerException;
+import sun.jvm.hotspot.debugger.DebuggerUtilities;
+import sun.jvm.hotspot.debugger.MachineDescription;
+import sun.jvm.hotspot.debugger.NotInHeapException;
+import sun.jvm.hotspot.debugger.OopHandle;
+import sun.jvm.hotspot.debugger.ReadResult;
+import sun.jvm.hotspot.debugger.ThreadProxy;
+import sun.jvm.hotspot.debugger.UnalignedAddressException;
+import sun.jvm.hotspot.debugger.UnmappedAddressException;
+import sun.jvm.hotspot.debugger.cdbg.CDebugger;
+import sun.jvm.hotspot.debugger.cdbg.ClosestSymbol;
+import sun.jvm.hotspot.debugger.cdbg.LoadObject;
+import sun.jvm.hotspot.runtime.JavaThread;
+import sun.jvm.hotspot.runtime.Threads;
 import sun.jvm.hotspot.runtime.VM;
-import sun.jvm.hotspot.runtime.Threads;
-import sun.jvm.hotspot.runtime.JavaThread;
-import java.lang.reflect.*;
+import sun.jvm.hotspot.utilities.PlatformInfo;
 
 /** <P> An implementation of the JVMDebugger interface. The basic debug
     facilities are implemented through ptrace interface in the JNI code
@@ -246,10 +257,8 @@
     /* called from attach methods */
     private void findABIVersion() throws DebuggerException {
         String libjvmName = isDarwin ? "libjvm.dylib" : "libjvm.so";
-        String libjvm_gName = isDarwin? "libjvm_g.dylib" : "libjvm_g.so";
         String javaThreadVt = isDarwin ? "_vt_10JavaThread" : "__vt_10JavaThread";
-        if (lookupByName0(libjvmName, javaThreadVt) != 0 ||
-            lookupByName0(libjvm_gName, javaThreadVt) != 0) {
+        if (lookupByName0(libjvmName, javaThreadVt) != 0) {
             // old C++ ABI
             useGCC32ABI = false;
         } else {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java	Wed Jul 05 18:55:08 2017 +0200
@@ -24,14 +24,25 @@
 
 package sun.jvm.hotspot.debugger.linux;
 
-import java.io.*;
-import java.net.*;
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.x86.*;
-import sun.jvm.hotspot.debugger.cdbg.*;
-import sun.jvm.hotspot.utilities.*;
-import java.lang.reflect.*;
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.debugger.DebuggerBase;
+import sun.jvm.hotspot.debugger.DebuggerException;
+import sun.jvm.hotspot.debugger.DebuggerUtilities;
+import sun.jvm.hotspot.debugger.MachineDescription;
+import sun.jvm.hotspot.debugger.NotInHeapException;
+import sun.jvm.hotspot.debugger.OopHandle;
+import sun.jvm.hotspot.debugger.ReadResult;
+import sun.jvm.hotspot.debugger.ThreadProxy;
+import sun.jvm.hotspot.debugger.UnalignedAddressException;
+import sun.jvm.hotspot.debugger.UnmappedAddressException;
+import sun.jvm.hotspot.debugger.cdbg.CDebugger;
+import sun.jvm.hotspot.debugger.cdbg.ClosestSymbol;
+import sun.jvm.hotspot.debugger.cdbg.LoadObject;
+import sun.jvm.hotspot.utilities.PlatformInfo;
 
 /** <P> An implementation of the JVMDebugger interface. The basic debug
     facilities are implemented through ptrace interface in the JNI code
@@ -238,8 +249,7 @@
 
     /* called from attach methods */
     private void findABIVersion() throws DebuggerException {
-        if (lookupByName0("libjvm.so", "__vt_10JavaThread") != 0 ||
-            lookupByName0("libjvm_g.so", "__vt_10JavaThread") != 0) {
+        if (lookupByName0("libjvm.so", "__vt_10JavaThread") != 0) {
             // old C++ ABI
             useGCC32ABI = false;
         } else {
--- a/hotspot/make/excludeSrc.make	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/make/excludeSrc.make	Wed Jul 05 18:55:08 2017 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -81,22 +81,25 @@
 	cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \
 	cmsGCAdaptivePolicyCounters.cpp cmsLockVerifier.cpp compactibleFreeListSpace.cpp \
 	concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp \
-	freeChunk.cpp adaptiveFreeList.cpp promotionInfo.cpp vmCMSOperations.cpp collectionSetChooser.cpp \
-	concurrentG1Refine.cpp concurrentG1RefineThread.cpp concurrentMark.cpp concurrentMarkThread.cpp \
-	dirtyCardQueue.cpp g1AllocRegion.cpp g1BlockOffsetTable.cpp g1CollectedHeap.cpp g1GCPhaseTimes.cpp \
-	g1CollectorPolicy.cpp g1ErgoVerbose.cpp g1_globals.cpp g1HRPrinter.cpp g1MarkSweep.cpp \
-	g1MMUTracker.cpp g1MonitoringSupport.cpp g1RemSet.cpp g1SATBCardTableModRefBS.cpp heapRegion.cpp \
-	heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp ptrQueue.cpp \
-	satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp adjoiningGenerations.cpp \
-	adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp cardTableExtension.cpp \
-	gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp parallelScavengeHeap.cpp parMarkBitMap.cpp \
-	pcTasks.cpp psAdaptiveSizePolicy.cpp psCompactionManager.cpp psGCAdaptivePolicyCounters.cpp \
-	psGenerationCounters.cpp psMarkSweep.cpp psMarkSweepDecorator.cpp psOldGen.cpp psParallelCompact.cpp \
-	psPromotionLAB.cpp psPromotionManager.cpp psScavenge.cpp psTasks.cpp psVirtualspace.cpp \
-	psYoungGen.cpp vmPSOperations.cpp asParNewGeneration.cpp parCardTableModRefBS.cpp \
-	parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp gSpaceCounters.cpp allocationStats.cpp \
-	spaceCounters.cpp gcAdaptivePolicyCounters.cpp mutableNUMASpace.cpp immutableSpace.cpp \
-	immutableSpace.cpp g1MemoryPool.cpp psMemoryPool.cpp yieldingWorkGroup.cpp g1Log.cpp
+	freeChunk.cpp adaptiveFreeList.cpp promotionInfo.cpp vmCMSOperations.cpp \
+	collectionSetChooser.cpp concurrentG1Refine.cpp concurrentG1RefineThread.cpp \
+	concurrentMark.cpp concurrentMarkThread.cpp dirtyCardQueue.cpp g1AllocRegion.cpp \
+	g1BlockOffsetTable.cpp g1CardCounts.cpp g1CollectedHeap.cpp g1CollectorPolicy.cpp \
+	g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
+	g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \
+	g1RemSet.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
+	heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
+	ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
+	adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
+	cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \
+	parallelScavengeHeap.cpp parMarkBitMap.cpp pcTasks.cpp psAdaptiveSizePolicy.cpp \
+	psCompactionManager.cpp psGCAdaptivePolicyCounters.cpp psGenerationCounters.cpp \
+	psMarkSweep.cpp psMarkSweepDecorator.cpp psMemoryPool.cpp psOldGen.cpp \
+	psParallelCompact.cpp psPromotionLAB.cpp psPromotionManager.cpp psScavenge.cpp \
+	psTasks.cpp psVirtualspace.cpp psYoungGen.cpp vmPSOperations.cpp asParNewGeneration.cpp \
+	parCardTableModRefBS.cpp parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp \
+	gSpaceCounters.cpp allocationStats.cpp spaceCounters.cpp gcAdaptivePolicyCounters.cpp \
+	mutableNUMASpace.cpp immutableSpace.cpp yieldingWorkGroup.cpp
 endif 
 
 ifeq ($(INCLUDE_NMT), false)
--- a/hotspot/make/hotspot_version	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/make/hotspot_version	Wed Jul 05 18:55:08 2017 +0200
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=31
+HS_BUILD_NUMBER=32
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/hotspot/make/windows/makefiles/compile.make	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/make/windows/makefiles/compile.make	Wed Jul 05 18:55:08 2017 +0200
@@ -52,7 +52,7 @@
 # improving the quality of crash log stack traces involving jvm.dll.
 
 # These are always used in all compiles
-CXX_FLAGS=/nologo /W3 /WX
+CXX_FLAGS=$(EXTRA_CFLAGS) /nologo /W3 /WX
 
 # Let's add debug information when Full Debug Symbols is enabled
 !if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
--- a/hotspot/make/windows/makefiles/defs.make	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/make/windows/makefiles/defs.make	Wed Jul 05 18:55:08 2017 +0200
@@ -193,7 +193,7 @@
   MAKE_ARGS += JDK_BUILD_NUMBER=$(COOKED_BUILD_NUMBER)
 endif
 
-NMAKE= MAKEFLAGS= MFLAGS= nmake -NOLOGO
+NMAKE= MAKEFLAGS= MFLAGS= EXTRA_CFLAGS="$(EXTRA_CFLAGS)" nmake -NOLOGO
 ifndef SYSTEM_UNAME
   SYSTEM_UNAME := $(shell uname)
   export SYSTEM_UNAME
--- a/hotspot/src/cpu/sparc/vm/globalDefinitions_sparc.hpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/globalDefinitions_sparc.hpp	Wed Jul 05 18:55:08 2017 +0200
@@ -30,4 +30,6 @@
 
 const int StackAlignmentInBytes = (2*wordSize);
 
+#define SUPPORTS_NATIVE_CX8
+
 #endif // CPU_SPARC_VM_GLOBALDEFINITIONS_SPARC_HPP
--- a/hotspot/src/cpu/sparc/vm/jni_sparc.h	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/jni_sparc.h	Wed Jul 05 18:55:08 2017 +0200
@@ -23,7 +23,12 @@
  * questions.
  */
 
-#if defined(__GNUC__) && (__GNUC__ >= 4)
+// Note: please do not change these without also changing jni_md.h in the JDK
+// repository
+#ifndef __has_attribute
+  #define __has_attribute(x) 0
+#endif
+#if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility)
   #define JNIEXPORT     __attribute__((visibility("default")))
   #define JNIIMPORT     __attribute__((visibility("default")))
 #else
--- a/hotspot/src/cpu/x86/vm/globalDefinitions_x86.hpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/globalDefinitions_x86.hpp	Wed Jul 05 18:55:08 2017 +0200
@@ -27,4 +27,6 @@
 
 const int StackAlignmentInBytes  = 16;
 
+#define SUPPORTS_NATIVE_CX8
+
 #endif // CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP
--- a/hotspot/src/cpu/x86/vm/jni_x86.h	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/jni_x86.h	Wed Jul 05 18:55:08 2017 +0200
@@ -28,7 +28,13 @@
 
 #if defined(SOLARIS) || defined(LINUX) || defined(_ALLBSD_SOURCE)
 
-#if defined(__GNUC__) && (__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2)
+
+// Note: please do not change these without also changing jni_md.h in the JDK
+// repository
+#ifndef __has_attribute
+  #define __has_attribute(x) 0
+#endif
+#if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility)
   #define JNIEXPORT     __attribute__((visibility("default")))
   #define JNIIMPORT     __attribute__((visibility("default")))
 #else
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -212,7 +212,13 @@
 
   // Update the invocation counter
   if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) {
-    InvocationCounter *counter = method->invocation_counter();
+    MethodCounters* mcs = method->method_counters();
+    if (mcs == NULL) {
+      CALL_VM_NOCHECK(mcs = InterpreterRuntime::build_method_counters(thread, method));
+      if (HAS_PENDING_EXCEPTION)
+        goto unwind_and_return;
+    }
+    InvocationCounter *counter = mcs->invocation_counter();
     counter->increment();
     if (counter->reached_InvocationLimit()) {
       CALL_VM_NOCHECK(
--- a/hotspot/src/cpu/zero/vm/jni_zero.h	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/cpu/zero/vm/jni_zero.h	Wed Jul 05 18:55:08 2017 +0200
@@ -25,7 +25,13 @@
  */
 
 
-#if defined(__GNUC__) && (__GNUC__ >= 4)
+
+// Note: please do not change these without also changing jni_md.h in the JDK
+// repository
+#ifndef __has_attribute
+  #define __has_attribute(x) 0
+#endif
+#if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility)
   #define JNIEXPORT     __attribute__((visibility("default")))
   #define JNIIMPORT     __attribute__((visibility("default")))
 #else
--- a/hotspot/src/share/vm/classfile/bytecodeAssembler.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/classfile/bytecodeAssembler.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -75,8 +75,8 @@
     int idx = i + _orig->length();
     switch (entry._tag) {
       case BytecodeCPEntry::UTF8:
+        entry._u.utf8->increment_refcount();
         cp->symbol_at_put(idx, entry._u.utf8);
-        entry._u.utf8->increment_refcount();
         break;
       case BytecodeCPEntry::KLASS:
         cp->unresolved_klass_at_put(
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -830,7 +830,7 @@
             Klass *kk;
             {
               MutexLocker mu(SystemDictionary_lock, THREAD);
-              kk = find_class(name, ik->class_loader_data());
+              kk = find_class(d_index, d_hash, name, ik->class_loader_data());
             }
             if (kk != NULL) {
               // No clean up is needed if the shared class has been entered
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Wed Jul 05 18:55:08 2017 +0200
@@ -517,13 +517,18 @@
   template(sun_management_ManagementFactory,           "sun/management/ManagementFactory")                        \
   template(sun_management_Sensor,                      "sun/management/Sensor")                                   \
   template(sun_management_Agent,                       "sun/management/Agent")                                    \
+  template(sun_management_DiagnosticCommandImpl,       "sun/management/DiagnosticCommandImpl")                    \
   template(sun_management_GarbageCollectorImpl,        "sun/management/GarbageCollectorImpl")                     \
+  template(sun_management_ManagementFactoryHelper,     "sun/management/ManagementFactoryHelper")                  \
+  template(getDiagnosticCommandMBean_name,             "getDiagnosticCommandMBean")                               \
+  template(getDiagnosticCommandMBean_signature,        "()Lcom/sun/management/DiagnosticCommandMBean;")           \
   template(getGcInfoBuilder_name,                      "getGcInfoBuilder")                                        \
   template(getGcInfoBuilder_signature,                 "()Lsun/management/GcInfoBuilder;")                        \
   template(com_sun_management_GcInfo,                  "com/sun/management/GcInfo")                               \
   template(com_sun_management_GcInfo_constructor_signature, "(Lsun/management/GcInfoBuilder;JJJ[Ljava/lang/management/MemoryUsage;[Ljava/lang/management/MemoryUsage;[Ljava/lang/Object;)V") \
   template(createGCNotification_name,                  "createGCNotification")                                    \
   template(createGCNotification_signature,             "(JLjava/lang/String;Ljava/lang/String;Ljava/lang/String;Lcom/sun/management/GcInfo;)V") \
+  template(createDiagnosticFrameworkNotification_name, "createDiagnosticFrameworkNotification")                   \
   template(createMemoryPoolMBean_name,                 "createMemoryPoolMBean")                                   \
   template(createMemoryManagerMBean_name,              "createMemoryManagerMBean")                                \
   template(createGarbageCollectorMBean_name,           "createGarbageCollectorMBean")                             \
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -193,7 +193,8 @@
      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
   CardGeneration(rs, initial_byte_size, level, ct),
   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
-  _debug_collection_type(Concurrent_collection_type)
+  _debug_collection_type(Concurrent_collection_type),
+  _did_compact(false)
 {
   HeapWord* bottom = (HeapWord*) _virtual_space.low();
   HeapWord* end    = (HeapWord*) _virtual_space.high();
@@ -917,18 +918,15 @@
     return;
   }
 
-  // Compute some numbers about the state of the heap.
-  const size_t used_after_gc = used();
-  const size_t capacity_after_gc = capacity();
+  // The heap has been compacted but not reset yet.
+  // Any metric such as free() or used() will be incorrect.
 
   CardGeneration::compute_new_size();
 
   // Reset again after a possible resizing
-  cmsSpace()->reset_after_compaction();
-
-  assert(used() == used_after_gc && used_after_gc <= capacity(),
-         err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
-         " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
+  if (did_compact()) {
+    cmsSpace()->reset_after_compaction();
+  }
 }
 
 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
@@ -1578,6 +1576,8 @@
   return false;
 }
 
+void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
+
 // Clear _expansion_cause fields of constituent generations
 void CMSCollector::clear_expansion_cause() {
   _cmsGen->clear_expansion_cause();
@@ -1675,7 +1675,6 @@
   }
   acquire_control_and_collect(full, clear_all_soft_refs);
   _full_gcs_since_conc_gc++;
-
 }
 
 void CMSCollector::request_full_gc(unsigned int full_gc_count) {
@@ -1857,6 +1856,7 @@
     }
   }
 
+  set_did_compact(should_compact);
   if (should_compact) {
     // If the collection is being acquired from the background
     // collector, there may be references on the discovered
@@ -2718,6 +2718,7 @@
     Chunk::clean_chunk_pool();
   }
 
+  set_did_compact(false);
   _between_prologue_and_epilogue = false;  // ready for next cycle
 }
 
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Jul 05 18:55:08 2017 +0200
@@ -604,6 +604,8 @@
   ConcurrentMarkSweepPolicy* _collector_policy;
   ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
 
+  void set_did_compact(bool v);
+
   // XXX Move these to CMSStats ??? FIX ME !!!
   elapsedTimer _inter_sweep_timer;   // time between sweeps
   elapsedTimer _intra_sweep_timer;   // time _in_ sweeps
@@ -1081,6 +1083,10 @@
 
   CollectionTypes _debug_collection_type;
 
+  // True if a compactiing collection was done.
+  bool _did_compact;
+  bool did_compact() { return _did_compact; }
+
   // Fraction of current occupancy at which to start a CMS collection which
   // will collect this generation (at least).
   double _initiating_occupancy;
@@ -1121,6 +1127,8 @@
   // Adaptive size policy
   CMSAdaptiveSizePolicy* size_policy();
 
+  void set_did_compact(bool v) { _did_compact = v; }
+
   bool refs_discovery_is_atomic() const { return false; }
   bool refs_discovery_is_mt()     const {
     // Note: CMS does MT-discovery during the parallel-remark
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,40 +26,12 @@
 #include "gc_implementation/g1/concurrentG1Refine.hpp"
 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
-#include "gc_implementation/g1/g1CollectorPolicy.hpp"
-#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
-#include "gc_implementation/g1/g1RemSet.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
-#include "memory/space.inline.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/java.hpp"
-#include "utilities/copy.hpp"
-
-// Possible sizes for the card counts cache: odd primes that roughly double in size.
-// (See jvmtiTagMap.cpp).
-
-#define MAX_SIZE ((size_t) -1)
+#include "gc_implementation/g1/g1HotCardCache.hpp"
 
-size_t ConcurrentG1Refine::_cc_cache_sizes[] = {
-          16381,    32771,    76831,    150001,   307261,
-         614563,  1228891,  2457733,   4915219,  9830479,
-       19660831, 39321619, 78643219, 157286461,  MAX_SIZE
-  };
-
-ConcurrentG1Refine::ConcurrentG1Refine() :
-  _card_counts(NULL), _card_epochs(NULL),
-  _n_card_counts(0), _max_cards(0), _max_n_card_counts(0),
-  _cache_size_index(0), _expand_card_counts(false),
-  _hot_cache(NULL),
-  _def_use_cache(false), _use_cache(false),
-  // We initialize the epochs of the array to 0. By initializing
-  // _n_periods to 1 and not 0 we automatically invalidate all the
-  // entries on the array. Otherwise we might accidentally think that
-  // we claimed a card that was in fact never set (see CR7033292).
-  _n_periods(1),
-  _threads(NULL), _n_threads(0)
+ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) :
+  _threads(NULL), _n_threads(0),
+  _hot_card_cache(g1h)
 {
-
   // Ergomonically select initial concurrent refinement parameters
   if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
     FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
@@ -75,13 +47,17 @@
     FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);
   }
   set_red_zone(MAX2<int>(G1ConcRefinementRedZone, yellow_zone()));
+
   _n_worker_threads = thread_num();
   // We need one extra thread to do the young gen rset size sampling.
   _n_threads = _n_worker_threads + 1;
+
   reset_threshold_step();
 
   _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads, mtGC);
+
   int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids();
+
   ConcurrentG1RefineThread *next = NULL;
   for (int i = _n_threads - 1; i >= 0; i--) {
     ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
@@ -100,74 +76,8 @@
   }
 }
 
-int ConcurrentG1Refine::thread_num() {
-  return MAX2<int>((G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads : ParallelGCThreads, 1);
-}
-
 void ConcurrentG1Refine::init() {
-  if (G1ConcRSLogCacheSize > 0) {
-    _g1h = G1CollectedHeap::heap();
-
-    _max_cards = _g1h->max_capacity() >> CardTableModRefBS::card_shift;
-    _max_n_card_counts = _max_cards * G1MaxHotCardCountSizePercent / 100;
-
-    size_t max_card_num = ((size_t)1 << (sizeof(unsigned)*BitsPerByte-1)) - 1;
-    guarantee(_max_cards < max_card_num, "card_num representation");
-
-    // We need _n_card_counts to be less than _max_n_card_counts here
-    // so that the expansion call (below) actually allocates the
-    // _counts and _epochs arrays.
-    assert(_n_card_counts == 0, "pre-condition");
-    assert(_max_n_card_counts > 0, "pre-condition");
-
-    // Find the index into cache size array that is of a size that's
-    // large enough to hold desired_sz.
-    size_t desired_sz = _max_cards / InitialCacheFraction;
-    int desired_sz_index = 0;
-    while (_cc_cache_sizes[desired_sz_index] < desired_sz) {
-      desired_sz_index += 1;
-      assert(desired_sz_index <  MAX_CC_CACHE_INDEX, "invariant");
-    }
-    assert(desired_sz_index <  MAX_CC_CACHE_INDEX, "invariant");
-
-    // If the desired_sz value is between two sizes then
-    // _cc_cache_sizes[desired_sz_index-1] < desired_sz <= _cc_cache_sizes[desired_sz_index]
-    // we will start with the lower size in the optimistic expectation that
-    // we will not need to expand up. Note desired_sz_index could also be 0.
-    if (desired_sz_index > 0 &&
-        _cc_cache_sizes[desired_sz_index] > desired_sz) {
-      desired_sz_index -= 1;
-    }
-
-    if (!expand_card_count_cache(desired_sz_index)) {
-      // Allocation was unsuccessful - exit
-      vm_exit_during_initialization("Could not reserve enough space for card count cache");
-    }
-    assert(_n_card_counts > 0, "post-condition");
-    assert(_cache_size_index == desired_sz_index, "post-condition");
-
-    Copy::fill_to_bytes(&_card_counts[0],
-                        _n_card_counts * sizeof(CardCountCacheEntry));
-    Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
-
-    ModRefBarrierSet* bs = _g1h->mr_bs();
-    guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
-    _ct_bs = (CardTableModRefBS*)bs;
-    _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
-
-    _def_use_cache = true;
-    _use_cache = true;
-    _hot_cache_size = (1 << G1ConcRSLogCacheSize);
-    _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC);
-    _n_hot = 0;
-    _hot_cache_idx = 0;
-
-    // For refining the cards in the hot cache in parallel
-    int n_workers = (ParallelGCThreads > 0 ?
-                        _g1h->workers()->total_workers() : 1);
-    _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers);
-    _hot_cache_par_claimed_idx = 0;
-  }
+  _hot_card_cache.initialize();
 }
 
 void ConcurrentG1Refine::stop() {
@@ -188,17 +98,6 @@
 }
 
 ConcurrentG1Refine::~ConcurrentG1Refine() {
-  if (G1ConcRSLogCacheSize > 0) {
-    // Please see the comment in allocate_card_count_cache
-    // for why we call os::malloc() and os::free() directly.
-    assert(_card_counts != NULL, "Logic");
-    os::free(_card_counts, mtGC);
-    assert(_card_epochs != NULL, "Logic");
-    os::free(_card_epochs, mtGC);
-
-    assert(_hot_cache != NULL, "Logic");
-    FREE_C_HEAP_ARRAY(jbyte*, _hot_cache, mtGC);
-  }
   if (_threads != NULL) {
     for (int i = 0; i < _n_threads; i++) {
       delete _threads[i];
@@ -215,317 +114,10 @@
   }
 }
 
-bool ConcurrentG1Refine::is_young_card(jbyte* card_ptr) {
-  HeapWord* start = _ct_bs->addr_for(card_ptr);
-  HeapRegion* r = _g1h->heap_region_containing(start);
-  if (r != NULL && r->is_young()) {
-    return true;
-  }
-  // This card is not associated with a heap region
-  // so can't be young.
-  return false;
-}
-
-jbyte* ConcurrentG1Refine::add_card_count(jbyte* card_ptr, int* count, bool* defer) {
-  unsigned new_card_num = ptr_2_card_num(card_ptr);
-  unsigned bucket = hash(new_card_num);
-  assert(0 <= bucket && bucket < _n_card_counts, "Bounds");
-
-  CardCountCacheEntry* count_ptr = &_card_counts[bucket];
-  CardEpochCacheEntry* epoch_ptr = &_card_epochs[bucket];
-
-  // We have to construct a new entry if we haven't updated the counts
-  // during the current period, or if the count was updated for a
-  // different card number.
-  unsigned int new_epoch = (unsigned int) _n_periods;
-  julong new_epoch_entry = make_epoch_entry(new_card_num, new_epoch);
-
-  while (true) {
-    // Fetch the previous epoch value
-    julong prev_epoch_entry = epoch_ptr->_value;
-    julong cas_res;
-
-    if (extract_epoch(prev_epoch_entry) != new_epoch) {
-      // This entry has not yet been updated during this period.
-      // Note: we update the epoch value atomically to ensure
-      // that there is only one winner that updates the cached
-      // card_ptr value even though all the refine threads share
-      // the same epoch value.
-
-      cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry,
-                                         (volatile jlong*)&epoch_ptr->_value,
-                                         (jlong) prev_epoch_entry);
-
-      if (cas_res == prev_epoch_entry) {
-        // We have successfully won the race to update the
-        // epoch and card_num value. Make it look like the
-        // count and eviction count were previously cleared.
-        count_ptr->_count = 1;
-        count_ptr->_evict_count = 0;
-        *count = 0;
-        // We can defer the processing of card_ptr
-        *defer = true;
-        return card_ptr;
-      }
-      // We did not win the race to update the epoch field, so some other
-      // thread must have done it. The value that gets returned by CAS
-      // should be the new epoch value.
-      assert(extract_epoch(cas_res) == new_epoch, "unexpected epoch");
-      // We could 'continue' here or just re-read the previous epoch value
-      prev_epoch_entry = epoch_ptr->_value;
-    }
-
-    // The epoch entry for card_ptr has been updated during this period.
-    unsigned old_card_num = extract_card_num(prev_epoch_entry);
-
-    // The card count that will be returned to caller
-    *count = count_ptr->_count;
-
-    // Are we updating the count for the same card?
-    if (new_card_num == old_card_num) {
-      // Same card - just update the count. We could have more than one
-      // thread racing to update count for the current card. It should be
-      // OK not to use a CAS as the only penalty should be some missed
-      // increments of the count which delays identifying the card as "hot".
-
-      if (*count < max_jubyte) count_ptr->_count++;
-      // We can defer the processing of card_ptr
-      *defer = true;
-      return card_ptr;
-    }
-
-    // Different card - evict old card info
-    if (count_ptr->_evict_count < max_jubyte) count_ptr->_evict_count++;
-    if (count_ptr->_evict_count > G1CardCountCacheExpandThreshold) {
-      // Trigger a resize the next time we clear
-      _expand_card_counts = true;
-    }
-
-    cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry,
-                                       (volatile jlong*)&epoch_ptr->_value,
-                                       (jlong) prev_epoch_entry);
-
-    if (cas_res == prev_epoch_entry) {
-      // We successfully updated the card num value in the epoch entry
-      count_ptr->_count = 0; // initialize counter for new card num
-      jbyte* old_card_ptr = card_num_2_ptr(old_card_num);
-
-      // Even though the region containg the card at old_card_num was not
-      // in the young list when old_card_num was recorded in the epoch
-      // cache it could have been added to the free list and subsequently
-      // added to the young list in the intervening time. See CR 6817995.
-      // We do not deal with this case here - it will be handled in
-      // HeapRegion::oops_on_card_seq_iterate_careful after it has been
-      // determined that the region containing the card has been allocated
-      // to, and it's safe to check the young type of the region.
-
-      // We do not want to defer processing of card_ptr in this case
-      // (we need to refine old_card_ptr and card_ptr)
-      *defer = false;
-      return old_card_ptr;
-    }
-    // Someone else beat us - try again.
-  }
-}
-
-jbyte* ConcurrentG1Refine::cache_insert(jbyte* card_ptr, bool* defer) {
-  int count;
-  jbyte* cached_ptr = add_card_count(card_ptr, &count, defer);
-  assert(cached_ptr != NULL, "bad cached card ptr");
-
-  // We've just inserted a card pointer into the card count cache
-  // and got back the card that we just inserted or (evicted) the
-  // previous contents of that count slot.
-
-  // The card we got back could be in a young region. When the
-  // returned card (if evicted) was originally inserted, we had
-  // determined that its containing region was not young. However
-  // it is possible for the region to be freed during a cleanup
-  // pause, then reallocated and tagged as young which will result
-  // in the returned card residing in a young region.
-  //
-  // We do not deal with this case here - the change from non-young
-  // to young could be observed at any time - it will be handled in
-  // HeapRegion::oops_on_card_seq_iterate_careful after it has been
-  // determined that the region containing the card has been allocated
-  // to.
-
-  // The card pointer we obtained from card count cache is not hot
-  // so do not store it in the cache; return it for immediate
-  // refining.
-  if (count < G1ConcRSHotCardLimit) {
-    return cached_ptr;
-  }
-
-  // Otherwise, the pointer we got from the _card_counts cache is hot.
-  jbyte* res = NULL;
-  MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
-  if (_n_hot == _hot_cache_size) {
-    res = _hot_cache[_hot_cache_idx];
-    _n_hot--;
-  }
-  // Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx.
-  _hot_cache[_hot_cache_idx] = cached_ptr;
-  _hot_cache_idx++;
-  if (_hot_cache_idx == _hot_cache_size) _hot_cache_idx = 0;
-  _n_hot++;
-
-  // The card obtained from the hot card cache could be in a young
-  // region. See above on how this can happen.
-
-  return res;
-}
-
-void ConcurrentG1Refine::clean_up_cache(int worker_i,
-                                        G1RemSet* g1rs,
-                                        DirtyCardQueue* into_cset_dcq) {
-  assert(!use_cache(), "cache should be disabled");
-  int start_idx;
-
-  while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once
-    int end_idx = start_idx + _hot_cache_par_chunk_size;
-
-    if (start_idx ==
-        Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
-      // The current worker has successfully claimed the chunk [start_idx..end_idx)
-      end_idx = MIN2(end_idx, _n_hot);
-      for (int i = start_idx; i < end_idx; i++) {
-        jbyte* entry = _hot_cache[i];
-        if (entry != NULL) {
-          if (g1rs->concurrentRefineOneCard(entry, worker_i, true)) {
-            // 'entry' contains references that point into the current
-            // collection set. We need to record 'entry' in the DCQS
-            // that's used for that purpose.
-            //
-            // The only time we care about recording cards that contain
-            // references that point into the collection set is during
-            // RSet updating while within an evacuation pause.
-            // In this case worker_i should be the id of a GC worker thread
-            assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
-            assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "incorrect worker id");
-            into_cset_dcq->enqueue(entry);
-          }
-        }
-      }
-    }
-  }
-}
-
-// The arrays used to hold the card counts and the epochs must have
-// a 1:1 correspondence. Hence they are allocated and freed together
-// Returns true if the allocations of both the counts and epochs
-// were successful; false otherwise.
-bool ConcurrentG1Refine::allocate_card_count_cache(size_t n,
-                                                   CardCountCacheEntry** counts,
-                                                   CardEpochCacheEntry** epochs) {
-  // We call the allocation/free routines directly for the counts
-  // and epochs arrays. The NEW_C_HEAP_ARRAY/FREE_C_HEAP_ARRAY
-  // macros call AllocateHeap and FreeHeap respectively.
-  // AllocateHeap will call vm_exit_out_of_memory in the event
-  // of an allocation failure and abort the JVM. With the
-  // _counts/epochs arrays we only need to abort the JVM if the
-  // initial allocation of these arrays fails.
-  //
-  // Additionally AllocateHeap/FreeHeap do some tracing of
-  // allocate/free calls so calling one without calling the
-  // other can cause inconsistencies in the tracing. So we
-  // call neither.
-
-  assert(*counts == NULL, "out param");
-  assert(*epochs == NULL, "out param");
-
-  size_t counts_size = n * sizeof(CardCountCacheEntry);
-  size_t epochs_size = n * sizeof(CardEpochCacheEntry);
-
-  *counts = (CardCountCacheEntry*) os::malloc(counts_size, mtGC);
-  if (*counts == NULL) {
-    // allocation was unsuccessful
-    return false;
-  }
-
-  *epochs = (CardEpochCacheEntry*) os::malloc(epochs_size, mtGC);
-  if (*epochs == NULL) {
-    // allocation was unsuccessful - free counts array
-    assert(*counts != NULL, "must be");
-    os::free(*counts, mtGC);
-    *counts = NULL;
-    return false;
-  }
-
-  // We successfully allocated both counts and epochs
-  return true;
-}
-
-// Returns true if the card counts/epochs cache was
-// successfully expanded; false otherwise.
-bool ConcurrentG1Refine::expand_card_count_cache(int cache_size_idx) {
-  // Can we expand the card count and epoch tables?
-  if (_n_card_counts < _max_n_card_counts) {
-    assert(cache_size_idx >= 0 && cache_size_idx  < MAX_CC_CACHE_INDEX, "oob");
-
-    size_t cache_size = _cc_cache_sizes[cache_size_idx];
-    // Make sure we don't go bigger than we will ever need
-    cache_size = MIN2(cache_size, _max_n_card_counts);
-
-    // Should we expand the card count and card epoch tables?
-    if (cache_size > _n_card_counts) {
-      // We have been asked to allocate new, larger, arrays for
-      // the card counts and the epochs. Attempt the allocation
-      // of both before we free the existing arrays in case
-      // the allocation is unsuccessful...
-      CardCountCacheEntry* counts = NULL;
-      CardEpochCacheEntry* epochs = NULL;
-
-      if (allocate_card_count_cache(cache_size, &counts, &epochs)) {
-        // Allocation was successful.
-        // We can just free the old arrays; we're
-        // not interested in preserving the contents
-        if (_card_counts != NULL) os::free(_card_counts, mtGC);
-        if (_card_epochs != NULL) os::free(_card_epochs, mtGC);
-
-        // Cache the size of the arrays and the index that got us there.
-        _n_card_counts = cache_size;
-        _cache_size_index = cache_size_idx;
-
-        _card_counts = counts;
-        _card_epochs = epochs;
-
-        // We successfully allocated/expanded the caches.
-        return true;
-      }
-    }
-  }
-
-  // We did not successfully expand the caches.
-  return false;
-}
-
-void ConcurrentG1Refine::clear_and_record_card_counts() {
-  if (G1ConcRSLogCacheSize == 0) {
-    return;
-  }
-
-  double start = os::elapsedTime();
-
-  if (_expand_card_counts) {
-    int new_idx = _cache_size_index + 1;
-
-    if (expand_card_count_cache(new_idx)) {
-      // Allocation was successful and  _n_card_counts has
-      // been updated to the new size. We only need to clear
-      // the epochs so we don't read a bogus epoch value
-      // when inserting a card into the hot card cache.
-      Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
-    }
-    _expand_card_counts = false;
-  }
-
-  int this_epoch = (int) _n_periods;
-  assert((this_epoch+1) <= max_jint, "to many periods");
-  // Update epoch
-  _n_periods++;
-  double cc_clear_time_ms = (os::elapsedTime() - start) * 1000;
-  _g1h->g1_policy()->phase_times()->record_cc_clear_time_ms(cc_clear_time_ms);
+int ConcurrentG1Refine::thread_num() {
+  int n_threads = (G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads
+                                                : ParallelGCThreads;
+  return MAX2<int>(n_threads, 1);
 }
 
 void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Wed Jul 05 18:55:08 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,13 +25,15 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTG1REFINE_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTG1REFINE_HPP
 
+#include "gc_implementation/g1/g1HotCardCache.hpp"
 #include "memory/allocation.hpp"
-#include "memory/cardTableModRefBS.hpp"
 #include "runtime/thread.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 // Forward decl
 class ConcurrentG1RefineThread;
+class G1CollectedHeap;
+class G1HotCardCache;
 class G1RemSet;
 
 class ConcurrentG1Refine: public CHeapObj<mtGC> {
@@ -61,141 +63,14 @@
 
   int _thread_threshold_step;
 
+  // We delay the refinement of 'hot' cards using the hot card cache.
+  G1HotCardCache _hot_card_cache;
+
   // Reset the threshold step value based of the current zone boundaries.
   void reset_threshold_step();
 
-  // The cache for card refinement.
-  bool   _use_cache;
-  bool   _def_use_cache;
-
-  size_t _n_periods;    // Used as clearing epoch
-
-  // An evicting cache of the number of times each card
-  // is accessed. Reduces, but does not eliminate, the amount
-  // of duplicated processing of dirty cards.
-
-  enum SomePrivateConstants {
-    epoch_bits           = 32,
-    card_num_shift       = epoch_bits,
-    epoch_mask           = AllBits,
-    card_num_mask        = AllBits,
-
-    // The initial cache size is approximately this fraction
-    // of a maximal cache (i.e. the size needed for all cards
-    // in the heap)
-    InitialCacheFraction = 512
-  };
-
-  const static julong card_num_mask_in_place =
-                        (julong) card_num_mask << card_num_shift;
-
-  typedef struct {
-    julong _value;      // |  card_num   |  epoch   |
-  } CardEpochCacheEntry;
-
-  julong make_epoch_entry(unsigned int card_num, unsigned int epoch) {
-    assert(0 <= card_num && card_num < _max_cards, "Bounds");
-    assert(0 <= epoch && epoch <= _n_periods, "must be");
-
-    return ((julong) card_num << card_num_shift) | epoch;
-  }
-
-  unsigned int extract_epoch(julong v) {
-    return (v & epoch_mask);
-  }
-
-  unsigned int extract_card_num(julong v) {
-    return (v & card_num_mask_in_place) >> card_num_shift;
-  }
-
-  typedef struct {
-    unsigned char _count;
-    unsigned char _evict_count;
-  } CardCountCacheEntry;
-
-  CardCountCacheEntry* _card_counts;
-  CardEpochCacheEntry* _card_epochs;
-
-  // The current number of buckets in the card count cache
-  size_t _n_card_counts;
-
-  // The number of cards for the entire reserved heap
-  size_t _max_cards;
-
-  // The max number of buckets for the card counts and epochs caches.
-  // This is the maximum that the counts and epochs will grow to.
-  // It is specified as a fraction or percentage of _max_cards using
-  // G1MaxHotCardCountSizePercent.
-  size_t _max_n_card_counts;
-
-  // Possible sizes of the cache: odd primes that roughly double in size.
-  // (See jvmtiTagMap.cpp).
-  enum {
-    MAX_CC_CACHE_INDEX = 15    // maximum index into the cache size array.
-  };
-
-  static size_t _cc_cache_sizes[MAX_CC_CACHE_INDEX];
-
-  // The index in _cc_cache_sizes corresponding to the size of
-  // _card_counts.
-  int _cache_size_index;
-
-  bool _expand_card_counts;
-
-  const jbyte* _ct_bot;
-
-  jbyte**      _hot_cache;
-  int          _hot_cache_size;
-  int          _n_hot;
-  int          _hot_cache_idx;
-
-  int          _hot_cache_par_chunk_size;
-  volatile int _hot_cache_par_claimed_idx;
-
-  // Needed to workaround 6817995
-  CardTableModRefBS* _ct_bs;
-  G1CollectedHeap*   _g1h;
-
-  // Helper routine for expand_card_count_cache().
-  // The arrays used to hold the card counts and the epochs must have
-  // a 1:1 correspondence. Hence they are allocated and freed together.
-  // Returns true if the allocations of both the counts and epochs
-  // were successful; false otherwise.
-  bool allocate_card_count_cache(size_t n,
-                                 CardCountCacheEntry** counts,
-                                 CardEpochCacheEntry** epochs);
-
-  // Expands the arrays that hold the card counts and epochs
-  // to the cache size at index. Returns true if the expansion/
-  // allocation was successful; false otherwise.
-  bool expand_card_count_cache(int index);
-
-  // hash a given key (index of card_ptr) with the specified size
-  static unsigned int hash(size_t key, size_t size) {
-    return (unsigned int) (key % size);
-  }
-
-  // hash a given key (index of card_ptr)
-  unsigned int hash(size_t key) {
-    return hash(key, _n_card_counts);
-  }
-
-  unsigned int ptr_2_card_num(jbyte* card_ptr) {
-    return (unsigned int) (card_ptr - _ct_bot);
-  }
-
-  jbyte* card_num_2_ptr(unsigned int card_num) {
-    return (jbyte*) (_ct_bot + card_num);
-  }
-
-  // Returns the count of this card after incrementing it.
-  jbyte* add_card_count(jbyte* card_ptr, int* count, bool* defer);
-
-  // Returns true if this card is in a young region
-  bool is_young_card(jbyte* card_ptr);
-
  public:
-  ConcurrentG1Refine();
+  ConcurrentG1Refine(G1CollectedHeap* g1h);
   ~ConcurrentG1Refine();
 
   void init(); // Accomplish some initialization that has to wait.
@@ -206,34 +81,6 @@
   // Iterate over the conc refine threads
   void threads_do(ThreadClosure *tc);
 
-  // If this is the first entry for the slot, writes into the cache and
-  // returns NULL.  If it causes an eviction, returns the evicted pointer.
-  // Otherwise, its a cache hit, and returns NULL.
-  jbyte* cache_insert(jbyte* card_ptr, bool* defer);
-
-  // Process the cached entries.
-  void clean_up_cache(int worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
-
-  // Set up for parallel processing of the cards in the hot cache
-  void clear_hot_cache_claimed_index() {
-    _hot_cache_par_claimed_idx = 0;
-  }
-
-  // Discard entries in the hot cache.
-  void clear_hot_cache() {
-    _hot_cache_idx = 0; _n_hot = 0;
-  }
-
-  bool hot_cache_is_empty() { return _n_hot == 0; }
-
-  bool use_cache() { return _use_cache; }
-  void set_use_cache(bool b) {
-    if (b) _use_cache = _def_use_cache;
-    else   _use_cache = false;
-  }
-
-  void clear_and_record_card_counts();
-
   static int thread_num();
 
   void print_worker_threads_on(outputStream* st) const;
@@ -250,6 +97,8 @@
   int worker_thread_num() const { return _n_worker_threads; }
 
   int thread_threshold_step() const { return _thread_threshold_step; }
+
+  G1HotCardCache* hot_card_cache() { return &_hot_card_cache; }
 };
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTG1REFINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1CardCounts.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
+#include "memory/cardTableModRefBS.hpp"
+#include "services/memTracker.hpp"
+#include "utilities/copy.hpp"
+
+void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
+  if (has_count_table()) {
+    check_card_num(from_card_num,
+                   err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
+    assert(from_card_num < to_card_num,
+           err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT,
+                   from_card_num, to_card_num));
+    assert(to_card_num <= _committed_max_card_num,
+           err_msg("to card num out of range: "
+                   "to: "SIZE_FORMAT ", "
+                   "max: "SIZE_FORMAT,
+                   to_card_num, _committed_max_card_num));
+
+    to_card_num = MIN2(_committed_max_card_num, to_card_num);
+
+    Copy::fill_to_bytes(&_card_counts[from_card_num], (to_card_num - from_card_num));
+  }
+}
+
+G1CardCounts::G1CardCounts(G1CollectedHeap *g1h):
+  _g1h(g1h), _card_counts(NULL),
+  _reserved_max_card_num(0), _committed_max_card_num(0),
+  _committed_size(0) {}
+
+void G1CardCounts::initialize() {
+  assert(_g1h->max_capacity() > 0, "initialization order");
+  assert(_g1h->capacity() == 0, "initialization order");
+
+  if (G1ConcRSHotCardLimit > 0) {
+    // The max value we can store in the counts table is
+    // max_jubyte. Guarantee the value of the hot
+    // threshold limit is no more than this.
+    guarantee(G1ConcRSHotCardLimit <= max_jubyte, "sanity");
+
+    ModRefBarrierSet* bs = _g1h->mr_bs();
+    guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
+    _ct_bs = (CardTableModRefBS*)bs;
+    _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
+
+    // Allocate/Reserve the counts table
+    size_t reserved_bytes = _g1h->max_capacity();
+    _reserved_max_card_num = reserved_bytes >> CardTableModRefBS::card_shift;
+
+    size_t reserved_size = _reserved_max_card_num * sizeof(jbyte);
+    ReservedSpace rs(ReservedSpace::allocation_align_size_up(reserved_size));
+    if (!rs.is_reserved()) {
+      warning("Could not reserve enough space for the card counts table");
+      guarantee(!has_reserved_count_table(), "should be NULL");
+      return;
+    }
+
+    MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
+
+    _card_counts_storage.initialize(rs, 0);
+    _card_counts = (jubyte*) _card_counts_storage.low();
+  }
+}
+
+void G1CardCounts::resize(size_t heap_capacity) {
+  // Expand the card counts table to handle a heap with the given capacity.
+
+  if (!has_reserved_count_table()) {
+    // Don't expand if we failed to reserve the card counts table.
+    return;
+  }
+
+  assert(_committed_size ==
+         ReservedSpace::allocation_align_size_up(_committed_size),
+         err_msg("Unaligned? committed_size: " SIZE_FORMAT, _committed_size));
+
+  // Verify that the committed space for the card counts
+  // matches our committed max card num.
+  size_t prev_committed_size = _committed_size;
+  size_t prev_committed_card_num = prev_committed_size / sizeof(jbyte);
+  assert(prev_committed_card_num == _committed_max_card_num,
+         err_msg("Card mismatch: "
+                 "prev: " SIZE_FORMAT ", "
+                 "committed: "SIZE_FORMAT,
+                 prev_committed_card_num, _committed_max_card_num));
+
+  size_t new_size = (heap_capacity >> CardTableModRefBS::card_shift) * sizeof(jbyte);
+  size_t new_committed_size = ReservedSpace::allocation_align_size_up(new_size);
+  size_t new_committed_card_num =
+                MIN2(_reserved_max_card_num, new_committed_size / sizeof(jbyte));
+
+  if (_committed_max_card_num < new_committed_card_num) {
+    // we need to expand the backing store for the card counts
+    size_t expand_size = new_committed_size - prev_committed_size;
+
+    if (!_card_counts_storage.expand_by(expand_size)) {
+      warning("Card counts table backing store commit failure");
+      return;
+    }
+    assert(_card_counts_storage.committed_size() == new_committed_size,
+           "expansion commit failure");
+
+    _committed_size = new_committed_size;
+    _committed_max_card_num = new_committed_card_num;
+
+    clear_range(prev_committed_card_num, _committed_max_card_num);
+  }
+}
+
+uint G1CardCounts::add_card_count(jbyte* card_ptr) {
+  // Returns the number of times the card has been refined.
+  // If we failed to reserve/commit the counts table, return 0.
+  // If card_ptr is beyond the committed end of the counts table,
+  // return 0.
+  // Otherwise return the actual count.
+  // Unless G1ConcRSHotCardLimit has been set appropriately,
+  // returning 0 will result in the card being considered
+  // cold and will be refined immediately.
+  uint count = 0;
+  if (has_count_table()) {
+    size_t card_num = ptr_2_card_num(card_ptr);
+    if (card_num < _committed_max_card_num) {
+      count = (uint) _card_counts[card_num];
+      if (count < G1ConcRSHotCardLimit) {
+        _card_counts[card_num] += 1;
+      }
+      assert(_card_counts[card_num] <= G1ConcRSHotCardLimit,
+             err_msg("Refinement count overflow? "
+                     "new count: "UINT32_FORMAT,
+                     (uint) _card_counts[card_num]));
+    }
+  }
+  return count;
+}
+
+bool G1CardCounts::is_hot(uint count) {
+  return (count >= G1ConcRSHotCardLimit);
+}
+
+void G1CardCounts::clear_region(HeapRegion* hr) {
+  assert(!hr->isHumongous(), "Should have been cleared");
+  if (has_count_table()) {
+    HeapWord* bottom = hr->bottom();
+
+    // We use the last address in hr as hr could be the
+    // last region in the heap. In which case trying to find
+    // the card for hr->end() will be an OOB accesss to the
+    // card table.
+    HeapWord* last = hr->end() - 1;
+    assert(_g1h->g1_committed().contains(last),
+           err_msg("last not in committed: "
+                   "last: " PTR_FORMAT ", "
+                   "committed: [" PTR_FORMAT ", " PTR_FORMAT ")",
+                   last,
+                   _g1h->g1_committed().start(),
+                   _g1h->g1_committed().end()));
+
+    const jbyte* from_card_ptr = _ct_bs->byte_for_const(bottom);
+    const jbyte* last_card_ptr = _ct_bs->byte_for_const(last);
+
+#ifdef ASSERT
+    HeapWord* start_addr = _ct_bs->addr_for(from_card_ptr);
+    assert(start_addr == hr->bottom(), "alignment");
+    HeapWord* last_addr = _ct_bs->addr_for(last_card_ptr);
+    assert((last_addr + CardTableModRefBS::card_size_in_words) == hr->end(), "alignment");
+#endif // ASSERT
+
+    // Clear the counts for the (exclusive) card range.
+    size_t from_card_num = ptr_2_card_num(from_card_ptr);
+    size_t to_card_num = ptr_2_card_num(last_card_ptr) + 1;
+    clear_range(from_card_num, to_card_num);
+  }
+}
+
+void G1CardCounts::clear_all() {
+  assert(SafepointSynchronize::is_at_safepoint(), "don't call this otherwise");
+  clear_range((size_t)0, _committed_max_card_num);
+}
+
+G1CardCounts::~G1CardCounts() {
+  if (has_reserved_count_table()) {
+    _card_counts_storage.release();
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp	Wed Jul 05 18:55:08 2017 +0200
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/virtualspace.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class CardTableModRefBS;
+class G1CollectedHeap;
+class HeapRegion;
+
+// Table to track the number of times a card has been refined. Once
+// a card has been refined a certain number of times, it is
+// considered 'hot' and its refinement is delayed by inserting the
+// card into the hot card cache. The card will then be refined when
+// it is evicted from the hot card cache, or when the hot card cache
+// is 'drained' during the next evacuation pause.
+
+class G1CardCounts: public CHeapObj<mtGC> {
+  G1CollectedHeap* _g1h;
+
+  // The table of counts
+  jubyte* _card_counts;
+
+  // Max capacity of the reserved space for the counts table
+  size_t _reserved_max_card_num;
+
+  // Max capacity of the committed space for the counts table
+  size_t _committed_max_card_num;
+
+  // Size of committed space for the counts table
+  size_t _committed_size;
+
+  // CardTable bottom.
+  const jbyte* _ct_bot;
+
+  // Barrier set
+  CardTableModRefBS* _ct_bs;
+
+  // The virtual memory backing the counts table
+  VirtualSpace _card_counts_storage;
+
+  // Returns true if the card counts table has been reserved.
+  bool has_reserved_count_table() { return _card_counts != NULL; }
+
+  // Returns true if the card counts table has been reserved and committed.
+  bool has_count_table() {
+    return has_reserved_count_table() && _committed_max_card_num > 0;
+  }
+
+  void check_card_num(size_t card_num, const char* msg) {
+    assert(card_num >= 0 && card_num < _committed_max_card_num, msg);
+  }
+
+  size_t ptr_2_card_num(const jbyte* card_ptr) {
+    assert(card_ptr >= _ct_bot,
+           err_msg("Inavalied card pointer: "
+                   "card_ptr: " PTR_FORMAT ", "
+                   "_ct_bot: " PTR_FORMAT,
+                   card_ptr, _ct_bot));
+    size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
+    check_card_num(card_num,
+                   err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
+    return card_num;
+  }
+
+  jbyte* card_num_2_ptr(size_t card_num) {
+    check_card_num(card_num,
+                   err_msg("card num out of range: "SIZE_FORMAT, card_num));
+    return (jbyte*) (_ct_bot + card_num);
+  }
+
+  // Clear the counts table for the given (exclusive) index range.
+  void clear_range(size_t from_card_num, size_t to_card_num);
+
+ public:
+  G1CardCounts(G1CollectedHeap* g1h);
+  ~G1CardCounts();
+
+  void initialize();
+
+  // Resize the committed space for the card counts table in
+  // response to a resize of the committed space for the heap.
+  void resize(size_t heap_capacity);
+
+  // Increments the refinement count for the given card.
+  // Returns the pre-increment count value.
+  uint add_card_count(jbyte* card_ptr);
+
+  // Returns true if the given count is high enough to be considered
+  // 'hot'; false otherwise.
+  bool is_hot(uint count);
+
+  // Clears the card counts for the cards spanned by the region
+  void clear_region(HeapRegion* hr);
+
+  // Clear the entire card counts table during GC.
+  // Updates the policy stats with the duration.
+  void clear_all();
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -96,7 +96,7 @@
     _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
   {}
   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
-    bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false);
+    bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false);
     // This path is executed by the concurrent refine or mutator threads,
     // concurrently, and so we do not care if card_ptr contains references
     // that point into the collection set.
@@ -1452,9 +1452,10 @@
         _hr_printer.end_gc(true /* full */, (size_t) total_collections());
       }
 
-      if (_cg1r->use_cache()) {
-        _cg1r->clear_and_record_card_counts();
-        _cg1r->clear_hot_cache();
+      G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
+      if (hot_card_cache->use_cache()) {
+        hot_card_cache->reset_card_counts();
+        hot_card_cache->reset_hot_cache();
       }
 
       // Rebuild remembered sets of all regions.
@@ -1767,6 +1768,8 @@
   Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
   // Tell the BOT about the update.
   _bot_shared->resize(_g1_committed.word_size());
+  // Tell the hot card cache about the update
+  _cg1r->hot_card_cache()->resize_card_counts(capacity());
 }
 
 bool G1CollectedHeap::expand(size_t expand_bytes) {
@@ -1843,33 +1846,32 @@
     ReservedSpace::page_align_size_down(shrink_bytes);
   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
                                          HeapRegion::GrainBytes);
-  uint num_regions_deleted = 0;
-  MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
+  uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
+
+  uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove);
   HeapWord* old_end = (HeapWord*) _g1_storage.high();
-  assert(mr.end() == old_end, "post-condition");
+  size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
 
   ergo_verbose3(ErgoHeapSizing,
                 "shrink the heap",
                 ergo_format_byte("requested shrinking amount")
                 ergo_format_byte("aligned shrinking amount")
                 ergo_format_byte("attempted shrinking amount"),
-                shrink_bytes, aligned_shrink_bytes, mr.byte_size());
-  if (mr.byte_size() > 0) {
+                shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
+  if (num_regions_removed > 0) {
+    _g1_storage.shrink_by(shrunk_bytes);
+    HeapWord* new_end = (HeapWord*) _g1_storage.high();
+
     if (_hr_printer.is_active()) {
-      HeapWord* curr = mr.end();
-      while (curr > mr.start()) {
+      HeapWord* curr = old_end;
+      while (curr > new_end) {
         HeapWord* curr_end = curr;
         curr -= HeapRegion::GrainWords;
         _hr_printer.uncommit(curr, curr_end);
       }
-      assert(curr == mr.start(), "post-condition");
     }
 
-    _g1_storage.shrink_by(mr.byte_size());
-    HeapWord* new_end = (HeapWord*) _g1_storage.high();
-    assert(mr.start() == new_end, "post-condition");
-
-    _expansion_regions += num_regions_deleted;
+    _expansion_regions += num_regions_removed;
     update_committed_space(old_end, new_end);
     HeapRegionRemSet::shrink_heap(n_regions());
     g1_policy()->record_new_heap_size(n_regions());
@@ -2000,7 +2002,7 @@
   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
 
-  _cg1r = new ConcurrentG1Refine();
+  _cg1r = new ConcurrentG1Refine(this);
 
   // Reserve the maximum.
 
@@ -2061,6 +2063,9 @@
                   (HeapWord*) _g1_reserved.end(),
                   _expansion_regions);
 
+  // Do later initialization work for concurrent refinement.
+  _cg1r->init();
+
   // 6843694 - ensure that the maximum region index can fit
   // in the remembered set structures.
   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
@@ -2078,20 +2083,20 @@
 
   _g1h = this;
 
-   _in_cset_fast_test_length = max_regions();
-   _in_cset_fast_test_base =
+  _in_cset_fast_test_length = max_regions();
+  _in_cset_fast_test_base =
                    NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC);
 
-   // We're biasing _in_cset_fast_test to avoid subtracting the
-   // beginning of the heap every time we want to index; basically
-   // it's the same with what we do with the card table.
-   _in_cset_fast_test = _in_cset_fast_test_base -
+  // We're biasing _in_cset_fast_test to avoid subtracting the
+  // beginning of the heap every time we want to index; basically
+  // it's the same with what we do with the card table.
+  _in_cset_fast_test = _in_cset_fast_test_base -
                ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
 
-   // Clear the _cset_fast_test bitmap in anticipation of adding
-   // regions to the incremental collection set for the first
-   // evacuation pause.
-   clear_cset_fast_test();
+  // Clear the _cset_fast_test bitmap in anticipation of adding
+  // regions to the incremental collection set for the first
+  // evacuation pause.
+  clear_cset_fast_test();
 
   // Create the ConcurrentMark data structure and thread.
   // (Must do this late, so that "max_regions" is defined.)
@@ -2153,9 +2158,6 @@
   // counts and that mechanism.
   SpecializationStats::clear();
 
-  // Do later initialization work for concurrent refinement.
-  _cg1r->init();
-
   // Here we allocate the dummy full region that is required by the
   // G1AllocRegion class. If we don't pass an address in the reserved
   // space here, lots of asserts fire.
@@ -2314,7 +2316,8 @@
                                                  bool concurrent,
                                                  int worker_i) {
   // Clean cards in the hot card cache
-  concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq);
+  G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
+  hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
 
   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   int n_completed_buffers = 0;
@@ -5604,8 +5607,11 @@
   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
 
   g1_rem_set()->prepare_for_oops_into_collection_set_do();
-  concurrent_g1_refine()->set_use_cache(false);
-  concurrent_g1_refine()->clear_hot_cache_claimed_index();
+
+  // Disable the hot card cache.
+  G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
+  hot_card_cache->reset_hot_cache_claimed_index();
+  hot_card_cache->set_use_cache(false);
 
   uint n_workers;
   if (G1CollectedHeap::use_parallel_gc_threads()) {
@@ -5687,8 +5693,11 @@
   release_gc_alloc_regions(n_workers);
   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
 
-  concurrent_g1_refine()->clear_hot_cache();
-  concurrent_g1_refine()->set_use_cache(true);
+  // Reset and re-enable the hot card cache.
+  // Note the counts for the cards in the regions in the
+  // collection set are reset when the collection set is freed.
+  hot_card_cache->reset_hot_cache();
+  hot_card_cache->set_use_cache(true);
 
   finalize_for_evac_failure();
 
@@ -5750,6 +5759,12 @@
   assert(!hr->is_empty(), "the region should not be empty");
   assert(free_list != NULL, "pre-condition");
 
+  // Clear the card counts for this region.
+  // Note: we only need to do this if the region is not young
+  // (since we don't refine cards in young regions).
+  if (!hr->is_young()) {
+    _cg1r->hot_card_cache()->reset_card_counts(hr);
+  }
   *pre_used += hr->used();
   hr->hr_clear(par, true /* clear_space */);
   free_list->add_as_head(hr);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -309,7 +309,8 @@
 
 void G1CollectorPolicy::initialize_flags() {
   set_min_alignment(HeapRegion::GrainBytes);
-  set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
+  size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
+  set_max_alignment(MAX2(card_table_alignment, min_alignment()));
   if (SurvivorRatio < 1) {
     vm_exit_during_initialization("Invalid survivor ratio specified");
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -155,11 +155,6 @@
 
 G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
   _max_gc_threads(max_gc_threads),
-  _min_clear_cc_time_ms(-1.0),
-  _max_clear_cc_time_ms(-1.0),
-  _cur_clear_cc_time_ms(0.0),
-  _cum_clear_cc_time_ms(0.0),
-  _num_cc_clears(0L),
   _last_gc_worker_start_times_ms(_max_gc_threads, "%.1lf", false),
   _last_ext_root_scan_times_ms(_max_gc_threads, "%.1lf"),
   _last_satb_filtering_times_ms(_max_gc_threads, "%.1lf"),
@@ -212,11 +207,11 @@
     _last_gc_worker_times_ms.set(i, worker_time);
 
     double worker_known_time = _last_ext_root_scan_times_ms.get(i) +
-      _last_satb_filtering_times_ms.get(i) +
-      _last_update_rs_times_ms.get(i) +
-      _last_scan_rs_times_ms.get(i) +
-      _last_obj_copy_times_ms.get(i) +
-      _last_termination_times_ms.get(i);
+                               _last_satb_filtering_times_ms.get(i) +
+                               _last_update_rs_times_ms.get(i) +
+                               _last_scan_rs_times_ms.get(i) +
+                               _last_obj_copy_times_ms.get(i) +
+                               _last_termination_times_ms.get(i);
 
     double worker_other_time = worker_time - worker_known_time;
     _last_gc_worker_other_times_ms.set(i, worker_other_time);
@@ -285,15 +280,6 @@
   }
   print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
   print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
-  if (Verbose && G1Log::finest()) {
-    print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
-    print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
-    print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
-    print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
-    if (_num_cc_clears > 0) {
-      print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
-    }
-  }
   double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms();
   print_stats(1, "Other", misc_time_ms);
   if (_cur_verify_before_time_ms > 0.0) {
@@ -311,19 +297,3 @@
     print_stats(2, "Verify After", _cur_verify_after_time_ms);
   }
 }
-
-void G1GCPhaseTimes::record_cc_clear_time_ms(double ms) {
-  if (!(Verbose && G1Log::finest())) {
-    return;
-  }
-
-  if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms) {
-    _min_clear_cc_time_ms = ms;
-  }
-  if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms) {
-    _max_clear_cc_time_ms = ms;
-  }
-  _cur_clear_cc_time_ms = ms;
-  _cum_clear_cc_time_ms += ms;
-  _num_cc_clears++;
-}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Wed Jul 05 18:55:08 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -133,13 +133,6 @@
   double _cur_ref_proc_time_ms;
   double _cur_ref_enq_time_ms;
 
-  // Card Table Count Cache stats
-  double _min_clear_cc_time_ms;         // min
-  double _max_clear_cc_time_ms;         // max
-  double _cur_clear_cc_time_ms;         // clearing time during current pause
-  double _cum_clear_cc_time_ms;         // cummulative clearing time
-  jlong  _num_cc_clears;                // number of times the card count cache has been cleared
-
   double _cur_collection_start_sec;
   double _root_region_scan_wait_time_ms;
 
@@ -227,8 +220,6 @@
     _root_region_scan_wait_time_ms = time_ms;
   }
 
-  void record_cc_clear_time_ms(double ms);
-
   void record_young_free_cset_time_ms(double time_ms) {
     _recorded_young_free_cset_time_ms = time_ms;
   }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/dirtyCardQueue.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/g1HotCardCache.hpp"
+#include "gc_implementation/g1/g1RemSet.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "runtime/atomic.hpp"
+
+G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
+  _g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
+
+void G1HotCardCache::initialize() {
+  if (default_use_cache()) {
+    _use_cache = true;
+
+    _hot_cache_size = (1 << G1ConcRSLogCacheSize);
+    _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC);
+
+    _n_hot = 0;
+    _hot_cache_idx = 0;
+
+    // For refining the cards in the hot cache in parallel
+    int n_workers = (ParallelGCThreads > 0 ?
+                        _g1h->workers()->total_workers() : 1);
+    _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers);
+    _hot_cache_par_claimed_idx = 0;
+
+    _card_counts.initialize();
+  }
+}
+
+G1HotCardCache::~G1HotCardCache() {
+  if (default_use_cache()) {
+    assert(_hot_cache != NULL, "Logic");
+    FREE_C_HEAP_ARRAY(jbyte*, _hot_cache, mtGC);
+  }
+}
+
+jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
+  uint count = _card_counts.add_card_count(card_ptr);
+  if (!_card_counts.is_hot(count)) {
+    // The card is not hot so do not store it in the cache;
+    // return it for immediate refining.
+    return card_ptr;
+  }
+
+  // Otherwise, the card is hot.
+  jbyte* res = NULL;
+  MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
+  if (_n_hot == _hot_cache_size) {
+    res = _hot_cache[_hot_cache_idx];
+    _n_hot--;
+  }
+
+  // Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx.
+  _hot_cache[_hot_cache_idx] = card_ptr;
+  _hot_cache_idx++;
+
+  if (_hot_cache_idx == _hot_cache_size) {
+    // Wrap around
+    _hot_cache_idx = 0;
+  }
+  _n_hot++;
+
+  return res;
+}
+
+void G1HotCardCache::drain(int worker_i,
+                           G1RemSet* g1rs,
+                           DirtyCardQueue* into_cset_dcq) {
+  if (!default_use_cache()) {
+    assert(_hot_cache == NULL, "Logic");
+    return;
+  }
+
+  assert(_hot_cache != NULL, "Logic");
+  assert(!use_cache(), "cache should be disabled");
+  int start_idx;
+
+  while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once
+    int end_idx = start_idx + _hot_cache_par_chunk_size;
+
+    if (start_idx ==
+        Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
+      // The current worker has successfully claimed the chunk [start_idx..end_idx)
+      end_idx = MIN2(end_idx, _n_hot);
+      for (int i = start_idx; i < end_idx; i++) {
+        jbyte* card_ptr = _hot_cache[i];
+        if (card_ptr != NULL) {
+          if (g1rs->refine_card(card_ptr, worker_i, true)) {
+            // The part of the heap spanned by the card contains references
+            // that point into the current collection set.
+            // We need to record the card pointer in the DirtyCardQueueSet
+            // that we use for such cards.
+            //
+            // The only time we care about recording cards that contain
+            // references that point into the collection set is during
+            // RSet updating while within an evacuation pause.
+            // In this case worker_i should be the id of a GC worker thread
+            assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
+            assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads),
+                   err_msg("incorrect worker id: "INT32_FORMAT, worker_i));
+
+            into_cset_dcq->enqueue(card_ptr);
+          }
+        }
+      }
+    }
+  }
+  // The existing entries in the hot card cache, which were just refined
+  // above, are discarded prior to re-enabling the cache near the end of the GC.
+}
+
+void G1HotCardCache::resize_card_counts(size_t heap_capacity) {
+  _card_counts.resize(heap_capacity);
+}
+
+void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
+  _card_counts.clear_region(hr);
+}
+
+void G1HotCardCache::reset_card_counts() {
+  _card_counts.clear_all();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp	Wed Jul 05 18:55:08 2017 +0200
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1HOTCARDCACHE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1HOTCARDCACHE_HPP
+
+#include "gc_implementation/g1/g1_globals.hpp"
+#include "gc_implementation/g1/g1CardCounts.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/thread.inline.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class DirtyCardQueue;
+class G1CollectedHeap;
+class G1RemSet;
+class HeapRegion;
+
+// An evicting cache of cards that have been logged by the G1 post
+// write barrier. Placing a card in the cache delays the refinement
+// of the card until the card is evicted, or the cache is drained
+// during the next evacuation pause.
+//
+// The first thing the G1 post write barrier does is to check whether
+// the card containing the updated pointer is already dirty and, if
+// so, skips the remaining code in the barrier.
+//
+// Delaying the refinement of a card will make the card fail the
+// first is_dirty check in the write barrier, skipping the remainder
+// of the write barrier.
+//
+// This can significantly reduce the overhead of the write barrier
+// code, increasing throughput.
+
+class G1HotCardCache: public CHeapObj<mtGC> {
+  G1CollectedHeap*   _g1h;
+
+  // The card cache table
+  jbyte**      _hot_cache;
+
+  int          _hot_cache_size;
+  int          _n_hot;
+  int          _hot_cache_idx;
+
+  int          _hot_cache_par_chunk_size;
+  volatile int _hot_cache_par_claimed_idx;
+
+  bool         _use_cache;
+
+  G1CardCounts _card_counts;
+
+  bool default_use_cache() const {
+    return (G1ConcRSLogCacheSize > 0);
+  }
+
+ public:
+  G1HotCardCache(G1CollectedHeap* g1h);
+  ~G1HotCardCache();
+
+  void initialize();
+
+  bool use_cache() { return _use_cache; }
+
+  void set_use_cache(bool b) {
+    _use_cache = (b ? default_use_cache() : false);
+  }
+
+  // Returns the card to be refined or NULL.
+  //
+  // Increments the count for given the card. if the card is not 'hot',
+  // it is returned for immediate refining. Otherwise the card is
+  // added to the hot card cache.
+  // If there is enough room in the hot card cache for the card we're
+  // adding, NULL is returned and no further action in needed.
+  // If we evict a card from the cache to make room for the new card,
+  // the evicted card is then returned for refinement.
+  jbyte* insert(jbyte* card_ptr);
+
+  // Refine the cards that have delayed as a result of
+  // being in the cache.
+  void drain(int worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
+
+  // Set up for parallel processing of the cards in the hot cache
+  void reset_hot_cache_claimed_index() {
+    _hot_cache_par_claimed_idx = 0;
+  }
+
+  // Resets the hot card cache and discards the entries.
+  void reset_hot_cache() {
+    assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
+    assert(Thread::current()->is_VM_thread(), "Current thread should be the VMthread");
+    _hot_cache_idx = 0; _n_hot = 0;
+  }
+
+  bool hot_cache_is_empty() { return _n_hot == 0; }
+
+  // Resizes the card counts table to match the given capacity
+  void resize_card_counts(size_t heap_capacity);
+
+  // Zeros the values in the card counts table for entire committed heap
+  void reset_card_counts();
+
+  // Zeros the values in the card counts table for the given region
+  void reset_card_counts(HeapRegion* hr);
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1HOTCARDCACHE_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/g1HotCardCache.hpp"
 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
 #include "gc_implementation/g1/g1RemSet.inline.hpp"
@@ -247,7 +248,7 @@
     assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
     assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker");
 
-    if (_g1rs->concurrentRefineOneCard(card_ptr, worker_i, true)) {
+    if (_g1rs->refine_card(card_ptr, worker_i, true)) {
       // 'card_ptr' contains references that point into the collection
       // set. We need to record the card in the DCQS
       // (G1CollectedHeap::into_cset_dirty_card_queue_set())
@@ -288,9 +289,6 @@
 #if CARD_REPEAT_HISTO
   ct_freq_update_histo_and_reset();
 #endif
-  if (worker_i == 0) {
-    _cg1r->clear_and_record_card_counts();
-  }
 
   // We cache the value of 'oc' closure into the appropriate slot in the
   // _cset_rs_update_cl for this worker
@@ -396,7 +394,7 @@
     //   RSet updating,
     // * the post-write barrier shouldn't be logging updates to young
     //   regions (but there is a situation where this can happen - see
-    //   the comment in G1RemSet::concurrentRefineOneCard below -
+    //   the comment in G1RemSet::refine_card() below -
     //   that should not be applicable here), and
     // * during actual RSet updating, the filtering of cards in young
     //   regions in HeapRegion::oops_on_card_seq_iterate_careful is
@@ -502,8 +500,6 @@
                                        claim_val);
 }
 
-
-
 G1TriggerClosure::G1TriggerClosure() :
   _triggered(false) { }
 
@@ -524,13 +520,91 @@
   _record_refs_into_cset(record_refs_into_cset),
   _push_ref_cl(push_ref_cl), _worker_i(worker_i) { }
 
-bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
-                                                   bool check_for_refs_into_cset) {
+// Returns true if the given card contains references that point
+// into the collection set, if we're checking for such references;
+// false otherwise.
+
+bool G1RemSet::refine_card(jbyte* card_ptr, int worker_i,
+                           bool check_for_refs_into_cset) {
+
+  // If the card is no longer dirty, nothing to do.
+  if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
+    // No need to return that this card contains refs that point
+    // into the collection set.
+    return false;
+  }
+
   // Construct the region representing the card.
   HeapWord* start = _ct_bs->addr_for(card_ptr);
   // And find the region containing it.
   HeapRegion* r = _g1->heap_region_containing(start);
-  assert(r != NULL, "unexpected null");
+  if (r == NULL) {
+    // Again no need to return that this card contains refs that
+    // point into the collection set.
+    return false;  // Not in the G1 heap (might be in perm, for example.)
+  }
+
+  // Why do we have to check here whether a card is on a young region,
+  // given that we dirty young regions and, as a result, the
+  // post-barrier is supposed to filter them out and never to enqueue
+  // them? When we allocate a new region as the "allocation region" we
+  // actually dirty its cards after we release the lock, since card
+  // dirtying while holding the lock was a performance bottleneck. So,
+  // as a result, it is possible for other threads to actually
+  // allocate objects in the region (after the acquire the lock)
+  // before all the cards on the region are dirtied. This is unlikely,
+  // and it doesn't happen often, but it can happen. So, the extra
+  // check below filters out those cards.
+  if (r->is_young()) {
+    return false;
+  }
+
+  // While we are processing RSet buffers during the collection, we
+  // actually don't want to scan any cards on the collection set,
+  // since we don't want to update remebered sets with entries that
+  // point into the collection set, given that live objects from the
+  // collection set are about to move and such entries will be stale
+  // very soon. This change also deals with a reliability issue which
+  // involves scanning a card in the collection set and coming across
+  // an array that was being chunked and looking malformed. Note,
+  // however, that if evacuation fails, we have to scan any objects
+  // that were not moved and create any missing entries.
+  if (r->in_collection_set()) {
+    return false;
+  }
+
+  // The result from the hot card cache insert call is either:
+  //   * pointer to the current card
+  //     (implying that the current card is not 'hot'),
+  //   * null
+  //     (meaning we had inserted the card ptr into the "hot" card cache,
+  //     which had some headroom),
+  //   * a pointer to a "hot" card that was evicted from the "hot" cache.
+  //
+
+  G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
+  if (hot_card_cache->use_cache()) {
+    assert(!check_for_refs_into_cset, "sanity");
+    assert(!SafepointSynchronize::is_at_safepoint(), "sanity");
+
+    card_ptr = hot_card_cache->insert(card_ptr);
+    if (card_ptr == NULL) {
+      // There was no eviction. Nothing to do.
+      return false;
+    }
+
+    start = _ct_bs->addr_for(card_ptr);
+    r = _g1->heap_region_containing(start);
+    if (r == NULL) {
+      // Not in the G1 heap
+      return false;
+    }
+
+    // Checking whether the region we got back from the cache
+    // is young here is inappropriate. The region could have been
+    // freed, reallocated and tagged as young while in the cache.
+    // Hence we could see its young type change at any time.
+  }
 
   // Don't use addr_for(card_ptr + 1) which can ask for
   // a card beyond the heap.  This is not safe without a perm
@@ -610,140 +684,17 @@
     _conc_refine_cards++;
   }
 
-  return trigger_cl.triggered();
-}
-
-bool G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
-                                              bool check_for_refs_into_cset) {
-  // If the card is no longer dirty, nothing to do.
-  if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
-    // No need to return that this card contains refs that point
-    // into the collection set.
-    return false;
-  }
-
-  // Construct the region representing the card.
-  HeapWord* start = _ct_bs->addr_for(card_ptr);
-  // And find the region containing it.
-  HeapRegion* r = _g1->heap_region_containing(start);
-  if (r == NULL) {
-    // Again no need to return that this card contains refs that
-    // point into the collection set.
-    return false;  // Not in the G1 heap (might be in perm, for example.)
-  }
-  // Why do we have to check here whether a card is on a young region,
-  // given that we dirty young regions and, as a result, the
-  // post-barrier is supposed to filter them out and never to enqueue
-  // them? When we allocate a new region as the "allocation region" we
-  // actually dirty its cards after we release the lock, since card
-  // dirtying while holding the lock was a performance bottleneck. So,
-  // as a result, it is possible for other threads to actually
-  // allocate objects in the region (after the acquire the lock)
-  // before all the cards on the region are dirtied. This is unlikely,
-  // and it doesn't happen often, but it can happen. So, the extra
-  // check below filters out those cards.
-  if (r->is_young()) {
-    return false;
-  }
-  // While we are processing RSet buffers during the collection, we
-  // actually don't want to scan any cards on the collection set,
-  // since we don't want to update remebered sets with entries that
-  // point into the collection set, given that live objects from the
-  // collection set are about to move and such entries will be stale
-  // very soon. This change also deals with a reliability issue which
-  // involves scanning a card in the collection set and coming across
-  // an array that was being chunked and looking malformed. Note,
-  // however, that if evacuation fails, we have to scan any objects
-  // that were not moved and create any missing entries.
-  if (r->in_collection_set()) {
-    return false;
-  }
+  // This gets set to true if the card being refined has
+  // references that point into the collection set.
+  bool has_refs_into_cset = trigger_cl.triggered();
 
-  // Should we defer processing the card?
-  //
-  // Previously the result from the insert_cache call would be
-  // either card_ptr (implying that card_ptr was currently "cold"),
-  // null (meaning we had inserted the card ptr into the "hot"
-  // cache, which had some headroom), or a "hot" card ptr
-  // extracted from the "hot" cache.
-  //
-  // Now that the _card_counts cache in the ConcurrentG1Refine
-  // instance is an evicting hash table, the result we get back
-  // could be from evicting the card ptr in an already occupied
-  // bucket (in which case we have replaced the card ptr in the
-  // bucket with card_ptr and "defer" is set to false). To avoid
-  // having a data structure (updates to which would need a lock)
-  // to hold these unprocessed dirty cards, we need to immediately
-  // process card_ptr. The actions needed to be taken on return
-  // from cache_insert are summarized in the following table:
-  //
-  // res      defer   action
-  // --------------------------------------------------------------
-  // null     false   card evicted from _card_counts & replaced with
-  //                  card_ptr; evicted ptr added to hot cache.
-  //                  No need to process res; immediately process card_ptr
-  //
-  // null     true    card not evicted from _card_counts; card_ptr added
-  //                  to hot cache.
-  //                  Nothing to do.
-  //
-  // non-null false   card evicted from _card_counts & replaced with
-  //                  card_ptr; evicted ptr is currently "cold" or
-  //                  caused an eviction from the hot cache.
-  //                  Immediately process res; process card_ptr.
-  //
-  // non-null true    card not evicted from _card_counts; card_ptr is
-  //                  currently cold, or caused an eviction from hot
-  //                  cache.
-  //                  Immediately process res; no need to process card_ptr.
-
-
-  jbyte* res = card_ptr;
-  bool defer = false;
+  // We should only be detecting that the card contains references
+  // that point into the collection set if the current thread is
+  // a GC worker thread.
+  assert(!has_refs_into_cset || SafepointSynchronize::is_at_safepoint(),
+           "invalid result at non safepoint");
 
-  // This gets set to true if the card being refined has references
-  // that point into the collection set.
-  bool oops_into_cset = false;
-
-  if (_cg1r->use_cache()) {
-    jbyte* res = _cg1r->cache_insert(card_ptr, &defer);
-    if (res != NULL && (res != card_ptr || defer)) {
-      start = _ct_bs->addr_for(res);
-      r = _g1->heap_region_containing(start);
-      if (r != NULL) {
-        // Checking whether the region we got back from the cache
-        // is young here is inappropriate. The region could have been
-        // freed, reallocated and tagged as young while in the cache.
-        // Hence we could see its young type change at any time.
-        //
-        // Process card pointer we get back from the hot card cache. This
-        // will check whether the region containing the card is young
-        // _after_ checking that the region has been allocated from.
-        oops_into_cset = concurrentRefineOneCard_impl(res, worker_i,
-                                                      false /* check_for_refs_into_cset */);
-        // The above call to concurrentRefineOneCard_impl is only
-        // performed if the hot card cache is enabled. This cache is
-        // disabled during an evacuation pause - which is the only
-        // time when we need know if the card contains references
-        // that point into the collection set. Also when the hot card
-        // cache is enabled, this code is executed by the concurrent
-        // refine threads - rather than the GC worker threads - and
-        // concurrentRefineOneCard_impl will return false.
-        assert(!oops_into_cset, "should not see true here");
-      }
-    }
-  }
-
-  if (!defer) {
-    oops_into_cset =
-      concurrentRefineOneCard_impl(card_ptr, worker_i, check_for_refs_into_cset);
-    // We should only be detecting that the card contains references
-    // that point into the collection set if the current thread is
-    // a GC worker thread.
-    assert(!oops_into_cset || SafepointSynchronize::is_at_safepoint(),
-           "invalid result at non safepoint");
-  }
-  return oops_into_cset;
+  return has_refs_into_cset;
 }
 
 class HRRSStatsIter: public HeapRegionClosure {
@@ -846,13 +797,16 @@
       DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
       dcqs.concatenate_logs();
     }
-    bool cg1r_use_cache = _cg1r->use_cache();
-    _cg1r->set_use_cache(false);
+
+    G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
+    bool use_hot_card_cache = hot_card_cache->use_cache();
+    hot_card_cache->set_use_cache(false);
+
     DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
     updateRS(&into_cset_dcq, 0);
     _g1->into_cset_dirty_card_queue_set().clear();
-    _cg1r->set_use_cache(cg1r_use_cache);
 
+    hot_card_cache->set_use_cache(use_hot_card_cache);
     assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
   }
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Wed Jul 05 18:55:08 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -66,14 +66,6 @@
   // references into the collection set.
   OopsInHeapRegionClosure** _cset_rs_update_cl;
 
-  // The routine that performs the actual work of refining a dirty
-  // card.
-  // If check_for_refs_into_refs is true then a true result is returned
-  // if the card contains oops that have references into the current
-  // collection set.
-  bool concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
-                                    bool check_for_refs_into_cset);
-
 public:
   // This is called to reset dual hash tables after the gc pause
   // is finished and the initial hash table is no longer being
@@ -90,8 +82,7 @@
   // function can be helpful in partitioning the work to be done. It
   // should be the same as the "i" passed to the calling thread's
   // work(i) function. In the sequential case this param will be ingored.
-  void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
-                                   int worker_i);
+  void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, int worker_i);
 
   // Prepare for and cleanup after an oops_into_collection_set_do
   // call.  Must call each of these once before and after (in sequential
@@ -124,14 +115,13 @@
   void scrub_par(BitMap* region_bm, BitMap* card_bm,
                  uint worker_num, int claim_val);
 
-  // Refine the card corresponding to "card_ptr".  If "sts" is non-NULL,
-  // join and leave around parts that must be atomic wrt GC.  (NULL means
-  // being done at a safepoint.)
+  // Refine the card corresponding to "card_ptr".
   // If check_for_refs_into_cset is true, a true result is returned
   // if the given card contains oops that have references into the
   // current collection set.
-  virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
-                                       bool check_for_refs_into_cset);
+  virtual bool refine_card(jbyte* card_ptr,
+                           int worker_i,
+                           bool check_for_refs_into_cset);
 
   // Print any relevant summary info.
   virtual void print_summary_info();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Wed Jul 05 18:55:08 2017 +0200
@@ -163,16 +163,12 @@
           "Select green, yellow and red zones adaptively to meet the "      \
           "the pause requirements.")                                        \
                                                                             \
-  develop(intx, G1ConcRSLogCacheSize, 10,                                   \
+  product(uintx, G1ConcRSLogCacheSize, 10,                                  \
           "Log base 2 of the length of conc RS hot-card cache.")            \
                                                                             \
-  develop(intx, G1ConcRSHotCardLimit, 4,                                    \
+  product(uintx, G1ConcRSHotCardLimit, 4,                                   \
           "The threshold that defines (>=) a hot card.")                    \
                                                                             \
-  develop(intx, G1MaxHotCardCountSizePercent, 25,                           \
-          "The maximum size of the hot card count cache as a "              \
-          "percentage of the number of cards for the maximum heap.")        \
-                                                                            \
   develop(bool, G1PrintOopAppls, false,                                     \
           "When true, print applications of closures to external locs.")    \
                                                                             \
@@ -247,10 +243,6 @@
           "If non-0 is the number of parallel rem set update threads, "     \
           "otherwise the value is determined ergonomically.")               \
                                                                             \
-  develop(intx, G1CardCountCacheExpandThreshold, 16,                        \
-          "Expand the card count cache if the number of collisions for "    \
-          "a particular entry exceeds this value.")                         \
-                                                                            \
   develop(bool, G1VerifyCTCleanup, false,                                   \
           "Verify card table cleanup.")                                     \
                                                                             \
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -124,11 +124,11 @@
       }
       assert(_regions[index] == NULL, "invariant");
       _regions[index] = new_hr;
-      increment_length(&_allocated_length);
+      increment_allocated_length();
     }
     // Have to increment the length first, otherwise we will get an
     // assert failure at(index) below.
-    increment_length(&_length);
+    increment_length();
     HeapRegion* hr = at(index);
     list->add_as_tail(hr);
 
@@ -201,45 +201,29 @@
   }
 }
 
-MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
-                                   uint* num_regions_deleted) {
+uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) {
   // Reset this in case it's currently pointing into the regions that
   // we just removed.
   _next_search_index = 0;
 
-  assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
-  assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
   assert(length() > 0, "the region sequence should not be empty");
   assert(length() <= _allocated_length, "invariant");
   assert(_allocated_length > 0, "we should have at least one region committed");
+  assert(num_regions_to_remove < length(), "We should never remove all regions");
 
-  // around the loop, i will be the next region to be removed
-  uint i = length() - 1;
-  assert(i > 0, "we should never remove all regions");
-  // [last_start, end) is the MemRegion that covers the regions we will remove.
-  HeapWord* end = at(i)->end();
-  HeapWord* last_start = end;
-  *num_regions_deleted = 0;
-  while (shrink_bytes > 0) {
-    HeapRegion* cur = at(i);
-    // We should leave the humongous regions where they are.
-    if (cur->isHumongous()) break;
-    // We should stop shrinking if we come across a non-empty region.
-    if (!cur->is_empty()) break;
+  uint i = 0;
+  for (; i < num_regions_to_remove; i++) {
+    HeapRegion* cur = at(length() - 1);
 
-    i -= 1;
-    *num_regions_deleted += 1;
-    shrink_bytes -= cur->capacity();
-    last_start = cur->bottom();
-    decrement_length(&_length);
-    // We will reclaim the HeapRegion. _allocated_length should be
-    // covering this index. So, even though we removed the region from
-    // the active set by decreasing _length, we still have it
-    // available in the future if we need to re-use it.
-    assert(i > 0, "we should never remove all regions");
-    assert(length() > 0, "we should never remove all regions");
+    if (!cur->is_empty()) {
+      // We have to give up if the region can not be moved
+      break;
   }
-  return MemRegion(last_start, end);
+    assert(!cur->isHumongous(), "Humongous regions should not be empty");
+
+    decrement_length();
+  }
+  return i;
 }
 
 #ifndef PRODUCT
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Wed Jul 05 18:55:08 2017 +0200
@@ -92,14 +92,19 @@
   // address is valid.
   inline uintx addr_to_index_biased(HeapWord* addr) const;
 
-  void increment_length(uint* length) {
-    assert(*length < _max_length, "pre-condition");
-    *length += 1;
+  void increment_allocated_length() {
+    assert(_allocated_length < _max_length, "pre-condition");
+    _allocated_length++;
   }
 
-  void decrement_length(uint* length) {
-    assert(*length > 0, "pre-condition");
-    *length -= 1;
+  void increment_length() {
+    assert(_length < _max_length, "pre-condition");
+    _length++;
+  }
+
+  void decrement_length() {
+    assert(_length > 0, "pre-condition");
+    _length--;
   }
 
  public:
@@ -153,11 +158,9 @@
   void iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const;
 
   // Tag as uncommitted as many regions that are completely free as
-  // possible, up to shrink_bytes, from the suffix of the committed
-  // sequence. Return a MemRegion that corresponds to the address
-  // range of the uncommitted regions. Assume shrink_bytes is page and
-  // heap region aligned.
-  MemRegion shrink_by(size_t shrink_bytes, uint* num_regions_deleted);
+  // possible, up to num_regions_to_remove, from the suffix of the committed
+  // sequence. Return the actual number of removed regions.
+  uint shrink_by(uint num_regions_to_remove);
 
   // Do some sanity checking.
   void verify_optional() PRODUCT_RETURN;
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -32,6 +32,7 @@
 #include "interpreter/interpreterRuntime.hpp"
 #include "memory/cardTableModRefBS.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/methodCounters.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
@@ -304,11 +305,12 @@
 
 
 #define METHOD istate->method()
-#define INVOCATION_COUNT METHOD->invocation_counter()
-#define BACKEDGE_COUNT METHOD->backedge_counter()
-
-
-#define INCR_INVOCATION_COUNT INVOCATION_COUNT->increment()
+#define GET_METHOD_COUNTERS(res)    \
+  res = METHOD->method_counters();  \
+  if (res == NULL) {                \
+    CALL_VM(res = InterpreterRuntime::build_method_counters(THREAD, METHOD), handle_exception); \
+  }
+
 #define OSR_REQUEST(res, branch_pc) \
             CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception);
 /*
@@ -325,10 +327,12 @@
 
 #define DO_BACKEDGE_CHECKS(skip, branch_pc)                                                         \
     if ((skip) <= 0) {                                                                              \
+      MethodCounters* mcs;                                                                          \
+      GET_METHOD_COUNTERS(mcs);                                                                     \
       if (UseLoopCounter) {                                                                         \
         bool do_OSR = UseOnStackReplacement;                                                        \
-        BACKEDGE_COUNT->increment();                                                                \
-        if (do_OSR) do_OSR = BACKEDGE_COUNT->reached_InvocationLimit();                             \
+        mcs->backedge_counter()->increment();                                                       \
+        if (do_OSR) do_OSR = mcs->backedge_counter()->reached_InvocationLimit();                    \
         if (do_OSR) {                                                                               \
           nmethod*  osr_nmethod;                                                                    \
           OSR_REQUEST(osr_nmethod, branch_pc);                                                      \
@@ -341,7 +345,7 @@
           }                                                                                         \
         }                                                                                           \
       }  /* UseCompiler ... */                                                                      \
-      INCR_INVOCATION_COUNT;                                                                        \
+      mcs->invocation_counter()->increment();                                                       \
       SAFEPOINT;                                                                                    \
     }
 
@@ -618,11 +622,13 @@
       // count invocations
       assert(initialized, "Interpreter not initialized");
       if (_compiling) {
+        MethodCounters* mcs;
+        GET_METHOD_COUNTERS(mcs);
         if (ProfileInterpreter) {
-          METHOD->increment_interpreter_invocation_count();
+          METHOD->increment_interpreter_invocation_count(THREAD);
         }
-        INCR_INVOCATION_COUNT;
-        if (INVOCATION_COUNT->reached_InvocationLimit()) {
+        mcs->invocation_counter()->increment();
+        if (mcs->invocation_counter()->reached_InvocationLimit()) {
             CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception);
 
             // We no longer retry on a counter overflow
--- a/hotspot/src/share/vm/memory/allocation.inline.hpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/memory/allocation.inline.hpp	Wed Jul 05 18:55:08 2017 +0200
@@ -132,7 +132,7 @@
   int alignment = os::vm_allocation_granularity();
   _size = align_size_up(_size, alignment);
 
-  _addr = os::reserve_memory(_size, NULL, alignment);
+  _addr = os::reserve_memory(_size, NULL, alignment, F);
   if (_addr == NULL) {
     vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (reserve)");
   }
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -48,6 +48,17 @@
 // CollectorPolicy methods.
 
 void CollectorPolicy::initialize_flags() {
+  assert(max_alignment() >= min_alignment(),
+      err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
+          max_alignment(), min_alignment()));
+  assert(max_alignment() % min_alignment() == 0,
+      err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT,
+          max_alignment(), min_alignment()));
+
+  if (MaxHeapSize < InitialHeapSize) {
+    vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
+  }
+
   if (MetaspaceSize > MaxMetaspaceSize) {
     MaxMetaspaceSize = MetaspaceSize;
   }
@@ -71,21 +82,9 @@
 }
 
 void CollectorPolicy::initialize_size_info() {
-  // User inputs from -mx and ms are aligned
-  set_initial_heap_byte_size(InitialHeapSize);
-  if (initial_heap_byte_size() == 0) {
-    set_initial_heap_byte_size(NewSize + OldSize);
-  }
-  set_initial_heap_byte_size(align_size_up(_initial_heap_byte_size,
-                                           min_alignment()));
-
-  set_min_heap_byte_size(Arguments::min_heap_size());
-  if (min_heap_byte_size() == 0) {
-    set_min_heap_byte_size(NewSize + OldSize);
-  }
-  set_min_heap_byte_size(align_size_up(_min_heap_byte_size,
-                                       min_alignment()));
-
+  // User inputs from -mx and ms must be aligned
+  set_min_heap_byte_size(align_size_up(Arguments::min_heap_size(), min_alignment()));
+  set_initial_heap_byte_size(align_size_up(InitialHeapSize, min_alignment()));
   set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment()));
 
   // Check heap parameter properties
@@ -201,9 +200,6 @@
   // All sizes must be multiples of the generation granularity.
   set_min_alignment((uintx) Generation::GenGrain);
   set_max_alignment(compute_max_alignment());
-  assert(max_alignment() >= min_alignment() &&
-         max_alignment() % min_alignment() == 0,
-         "invalid alignment constraints");
 
   CollectorPolicy::initialize_flags();
 
@@ -233,9 +229,6 @@
   GenCollectorPolicy::initialize_flags();
 
   OldSize = align_size_down(OldSize, min_alignment());
-  if (NewSize + OldSize > MaxHeapSize) {
-    MaxHeapSize = NewSize + OldSize;
-  }
 
   if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) {
     // NewRatio will be used later to set the young generation size so we use
@@ -250,6 +243,27 @@
   }
   MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
 
+  // adjust max heap size if necessary
+  if (NewSize + OldSize > MaxHeapSize) {
+    if (FLAG_IS_CMDLINE(MaxHeapSize)) {
+      // somebody set a maximum heap size with the intention that we should not
+      // exceed it. Adjust New/OldSize as necessary.
+      uintx calculated_size = NewSize + OldSize;
+      double shrink_factor = (double) MaxHeapSize / calculated_size;
+      // align
+      NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment());
+      // OldSize is already aligned because above we aligned MaxHeapSize to
+      // max_alignment(), and we just made sure that NewSize is aligned to
+      // min_alignment(). In initialize_flags() we verified that max_alignment()
+      // is a multiple of min_alignment().
+      OldSize = MaxHeapSize - NewSize;
+    } else {
+      MaxHeapSize = NewSize + OldSize;
+    }
+  }
+  // need to do this again
+  MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
+
   always_do_update_barrier = UseConcMarkSweepGC;
 
   // Check validity of heap flags
--- a/hotspot/src/share/vm/oops/method.hpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/oops/method.hpp	Wed Jul 05 18:55:08 2017 +0200
@@ -67,7 +67,7 @@
 // | ConstMethod*                   (oop)                 |
 // |------------------------------------------------------|
 // | methodData                     (oop)                 |
-// | interp_invocation_count                              |
+// | methodCounters                                       |
 // |------------------------------------------------------|
 // | access_flags                                         |
 // | vtable_index                                         |
@@ -76,16 +76,6 @@
 // |------------------------------------------------------|
 // | method_size             |   intrinsic_id|   flags    |
 // |------------------------------------------------------|
-// | throwout_count          |   num_breakpoints          |
-// |------------------------------------------------------|
-// | invocation_counter                                   |
-// | backedge_counter                                     |
-// |------------------------------------------------------|
-// |           prev_time (tiered only, 64 bit wide)       |
-// |                                                      |
-// |------------------------------------------------------|
-// |                  rate (tiered)                       |
-// |------------------------------------------------------|
 // | code                           (pointer)             |
 // | i2i                            (pointer)             |
 // | adapter                        (pointer)             |
--- a/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -341,6 +341,44 @@
   memcpy(writeable_address(length), annos->adr_at(0), length);
 }
 
+//  BootstrapMethods_attribute {
+//    u2 attribute_name_index;
+//    u4 attribute_length;
+//    u2 num_bootstrap_methods;
+//    {   u2 bootstrap_method_ref;
+//        u2 num_bootstrap_arguments;
+//        u2 bootstrap_arguments[num_bootstrap_arguments];
+//    } bootstrap_methods[num_bootstrap_methods];
+//  }
+void JvmtiClassFileReconstituter::write_bootstrapmethod_attribute() {
+  Array<u2>* operands = cpool()->operands();
+  write_attribute_name_index("BootstrapMethods");
+  int num_bootstrap_methods = ConstantPool::operand_array_length(operands);
+
+  // calculate length of attribute
+  int length = sizeof(u2); // num_bootstrap_methods
+  for (int n = 0; n < num_bootstrap_methods; n++) {
+    u2 num_bootstrap_arguments = cpool()->operand_argument_count_at(n);
+    length += sizeof(u2); // bootstrap_method_ref
+    length += sizeof(u2); // num_bootstrap_arguments
+    length += sizeof(u2) * num_bootstrap_arguments; // bootstrap_arguments[num_bootstrap_arguments]
+  }
+  write_u4(length);
+
+  // write attribute
+  write_u2(num_bootstrap_methods);
+  for (int n = 0; n < num_bootstrap_methods; n++) {
+    u2 bootstrap_method_ref = cpool()->operand_bootstrap_method_ref_index_at(n);
+    u2 num_bootstrap_arguments = cpool()->operand_argument_count_at(n);
+    write_u2(bootstrap_method_ref);
+    write_u2(num_bootstrap_arguments);
+    for (int arg = 0; arg < num_bootstrap_arguments; arg++) {
+      u2 bootstrap_argument = cpool()->operand_argument_index_at(n, arg);
+      write_u2(bootstrap_argument);
+    }
+  }
+}
+
 
 // Write InnerClasses attribute
 // JVMSpec|   InnerClasses_attribute {
@@ -513,6 +551,11 @@
   AnnotationArray* param_anno = method->parameter_annotations();
   AnnotationArray* default_anno = method->annotation_default();
 
+  // skip generated default interface methods
+  if (method->is_overpass()) {
+    return;
+  }
+
   write_u2(access_flags.get_flags() & JVM_RECOGNIZED_METHOD_MODIFIERS);
   write_u2(const_method->name_index());
   write_u2(const_method->signature_index());
@@ -592,6 +635,9 @@
   if (anno != NULL) {
     ++attr_count;     // has RuntimeVisibleAnnotations attribute
   }
+  if (cpool()->operands() != NULL) {
+    ++attr_count;
+  }
 
   write_u2(attr_count);
 
@@ -610,6 +656,9 @@
   if (anno != NULL) {
     write_annotations_attribute("RuntimeVisibleAnnotations", anno);
   }
+  if (cpool()->operands() != NULL) {
+    write_bootstrapmethod_attribute();
+  }
 }
 
 // Write the method information portion of ClassFile structure
@@ -619,8 +668,19 @@
   HandleMark hm(thread());
   Array<Method*>* methods = ikh()->methods();
   int num_methods = methods->length();
+  int num_overpass = 0;
 
-  write_u2(num_methods);
+  // count the generated default interface methods
+  // these will not be re-created by write_method_info
+  // and should not be included in the total count
+  for (int index = 0; index < num_methods; index++) {
+    Method* method = methods->at(index);
+    if (method->is_overpass()) {
+      num_overpass++;
+    }
+  }
+
+  write_u2(num_methods - num_overpass);
   if (JvmtiExport::can_maintain_original_method_order()) {
     int index;
     int original_index;
--- a/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.hpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.hpp	Wed Jul 05 18:55:08 2017 +0200
@@ -127,6 +127,7 @@
   void write_signature_attribute(u2 generic_signaure_index);
   void write_attribute_name_index(const char* name);
   void write_annotations_attribute(const char* attr_name, AnnotationArray* annos);
+  void write_bootstrapmethod_attribute();
 
   address writeable_address(size_t size);
   void write_u1(u1 x);
--- a/hotspot/src/share/vm/prims/perf.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/prims/perf.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -142,20 +142,20 @@
   }
 
   switch(variability) {
-  case 1:  /* V_Constant */
+  case PerfData::V_Constant:
     pl = PerfDataManager::create_long_constant(NULL_NS, (char *)name_utf,
                                                (PerfData::Units)units, value,
                                                CHECK_NULL);
     break;
 
-  case 2:  /* V_Variable */
-    pl = PerfDataManager::create_long_variable(NULL_NS, (char *)name_utf,
+  case PerfData::V_Monotonic:
+    pl = PerfDataManager::create_long_counter(NULL_NS, (char *)name_utf,
                                                (PerfData::Units)units, value,
                                                CHECK_NULL);
     break;
 
-  case 3:  /* V_Monotonic Counter */
-    pl = PerfDataManager::create_long_counter(NULL_NS, (char *)name_utf,
+  case PerfData::V_Variable:
+    pl = PerfDataManager::create_long_variable(NULL_NS, (char *)name_utf,
                                               (PerfData::Units)units, value,
                                               CHECK_NULL);
     break;
--- a/hotspot/src/share/vm/prims/unsafe.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/prims/unsafe.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -315,10 +315,7 @@
   OrderAccess::fence();
 UNSAFE_END
 
-#if defined(SPARC) || defined(X86)
-// Sparc and X86 have atomic jlong (8 bytes) instructions
-
-#else
+#ifndef SUPPORTS_NATIVE_CX8
 // Keep old code for platforms which may not have atomic jlong (8 bytes) instructions
 
 // Volatile long versions must use locks if !VM_Version::supports_cx8().
@@ -356,7 +353,7 @@
   }
 UNSAFE_END
 
-#endif // not SPARC and not X86
+#endif // not SUPPORTS_NATIVE_CX8
 
 #define DEFINE_GETSETOOP(jboolean, Boolean) \
  \
@@ -420,8 +417,7 @@
 DEFINE_GETSETOOP_VOLATILE(jfloat, Float);
 DEFINE_GETSETOOP_VOLATILE(jdouble, Double);
 
-#if defined(SPARC) || defined(X86)
-// Sparc and X86 have atomic jlong (8 bytes) instructions
+#ifdef SUPPORTS_NATIVE_CX8
 DEFINE_GETSETOOP_VOLATILE(jlong, Long);
 #endif
 
@@ -450,8 +446,7 @@
 
 UNSAFE_ENTRY(void, Unsafe_SetOrderedLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong x))
   UnsafeWrapper("Unsafe_SetOrderedLong");
-#if defined(SPARC) || defined(X86)
-  // Sparc and X86 have atomic jlong (8 bytes) instructions
+#ifdef SUPPORTS_NATIVE_CX8
   SET_FIELD_VOLATILE(obj, offset, jlong, x);
 #else
   // Keep old code for platforms which may not have atomic long (8 bytes) instructions
--- a/hotspot/src/share/vm/prims/whitebox.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/prims/whitebox.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -93,6 +93,15 @@
   return closure.found();
 WB_END
 
+WB_ENTRY(void, WB_PrintHeapSizes(JNIEnv* env, jobject o)) {
+  CollectorPolicy * p = Universe::heap()->collector_policy();
+  gclog_or_tty->print_cr("Minimum heap "SIZE_FORMAT" Initial heap "
+    SIZE_FORMAT" Maximum heap "SIZE_FORMAT" Min alignment "SIZE_FORMAT" Max alignment "SIZE_FORMAT,
+    p->min_heap_byte_size(), p->initial_heap_byte_size(), p->max_heap_byte_size(),
+    p->min_alignment(), p->max_alignment());
+}
+WB_END
+
 #if INCLUDE_ALL_GCS
 WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj))
   G1CollectedHeap* g1 = G1CollectedHeap::heap();
@@ -386,6 +395,7 @@
       CC"(Ljava/lang/String;[Lsun/hotspot/parser/DiagnosticCommand;)[Ljava/lang/Object;",
       (void*) &WB_ParseCommandLine
   },
+  {CC"printHeapSizes",     CC"()V",                   (void*)&WB_PrintHeapSizes    },
 #if INCLUDE_ALL_GCS
   {CC"g1InConcurrentMark", CC"()Z",                   (void*)&WB_G1InConcurrentMark},
   {CC"g1IsHumongous",      CC"(Ljava/lang/Object;)Z", (void*)&WB_G1IsHumongous     },
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -747,16 +747,16 @@
     return;
   }
 
-  int index = *count;
+  int new_count = *count + 1;
 
   // expand the array and add arg to the last element
-  (*count)++;
   if (*bldarray == NULL) {
-    *bldarray = NEW_C_HEAP_ARRAY(char*, *count, mtInternal);
+    *bldarray = NEW_C_HEAP_ARRAY(char*, new_count, mtInternal);
   } else {
-    *bldarray = REALLOC_C_HEAP_ARRAY(char*, *bldarray, *count, mtInternal);
+    *bldarray = REALLOC_C_HEAP_ARRAY(char*, *bldarray, new_count, mtInternal);
   }
-  (*bldarray)[index] = strdup(arg);
+  (*bldarray)[*count] = strdup(arg);
+  *count = new_count;
 }
 
 void Arguments::build_jvm_args(const char* arg) {
@@ -1617,30 +1617,38 @@
     FLAG_SET_ERGO(uintx, MaxHeapSize, (uintx)reasonable_max);
   }
 
-  // If the initial_heap_size has not been set with InitialHeapSize
-  // or -Xms, then set it as fraction of the size of physical memory,
-  // respecting the maximum and minimum sizes of the heap.
-  if (FLAG_IS_DEFAULT(InitialHeapSize)) {
+  // If the minimum or initial heap_size have not been set or requested to be set
+  // ergonomically, set them accordingly.
+  if (InitialHeapSize == 0 || min_heap_size() == 0) {
     julong reasonable_minimum = (julong)(OldSize + NewSize);
 
     reasonable_minimum = MIN2(reasonable_minimum, (julong)MaxHeapSize);
 
     reasonable_minimum = limit_by_allocatable_memory(reasonable_minimum);
 
-    julong reasonable_initial = phys_mem / InitialRAMFraction;
-
-    reasonable_initial = MAX2(reasonable_initial, reasonable_minimum);
-    reasonable_initial = MIN2(reasonable_initial, (julong)MaxHeapSize);
-
-    reasonable_initial = limit_by_allocatable_memory(reasonable_initial);
-
-    if (PrintGCDetails && Verbose) {
-      // Cannot use gclog_or_tty yet.
-      tty->print_cr("  Initial heap size " SIZE_FORMAT, (uintx)reasonable_initial);
-      tty->print_cr("  Minimum heap size " SIZE_FORMAT, (uintx)reasonable_minimum);
+    if (InitialHeapSize == 0) {
+      julong reasonable_initial = phys_mem / InitialRAMFraction;
+
+      reasonable_initial = MAX3(reasonable_initial, reasonable_minimum, (julong)min_heap_size());
+      reasonable_initial = MIN2(reasonable_initial, (julong)MaxHeapSize);
+
+      reasonable_initial = limit_by_allocatable_memory(reasonable_initial);
+
+      if (PrintGCDetails && Verbose) {
+        // Cannot use gclog_or_tty yet.
+        tty->print_cr("  Initial heap size " SIZE_FORMAT, (uintx)reasonable_initial);
+      }
+      FLAG_SET_ERGO(uintx, InitialHeapSize, (uintx)reasonable_initial);
     }
-    FLAG_SET_ERGO(uintx, InitialHeapSize, (uintx)reasonable_initial);
-    set_min_heap_size((uintx)reasonable_minimum);
+    // If the minimum heap size has not been set (via -Xms),
+    // synchronize with InitialHeapSize to avoid errors with the default value.
+    if (min_heap_size() == 0) {
+      set_min_heap_size(MIN2((uintx)reasonable_minimum, InitialHeapSize));
+      if (PrintGCDetails && Verbose) {
+        // Cannot use gclog_or_tty yet.
+        tty->print_cr("  Minimum heap size " SIZE_FORMAT, min_heap_size());
+      }
+    }
   }
 }
 
@@ -2043,6 +2051,10 @@
                                         "G1RefProcDrainInterval");
     status = status && verify_min_value((intx)G1ConcMarkStepDurationMillis, 1,
                                         "G1ConcMarkStepDurationMillis");
+    status = status && verify_interval(G1ConcRSHotCardLimit, 0, max_jubyte,
+                                       "G1ConcRSHotCardLimit");
+    status = status && verify_interval(G1ConcRSLogCacheSize, 0, 31,
+                                       "G1ConcRSLogCacheSize");
   }
 #endif // INCLUDE_ALL_GCS
 
@@ -2426,7 +2438,8 @@
     // -Xms
     } else if (match_option(option, "-Xms", &tail)) {
       julong long_initial_heap_size = 0;
-      ArgsRange errcode = parse_memory_size(tail, &long_initial_heap_size, 1);
+      // an initial heap size of 0 means automatically determine
+      ArgsRange errcode = parse_memory_size(tail, &long_initial_heap_size, 0);
       if (errcode != arg_in_range) {
         jio_fprintf(defaultStream::error_stream(),
                     "Invalid initial heap size: %s\n", option->optionString);
@@ -2437,7 +2450,7 @@
       // Currently the minimum size and the initial heap sizes are the same.
       set_min_heap_size(InitialHeapSize);
     // -Xmx
-    } else if (match_option(option, "-Xmx", &tail)) {
+    } else if (match_option(option, "-Xmx", &tail) || match_option(option, "-XX:MaxHeapSize=", &tail)) {
       julong long_max_heap_size = 0;
       ArgsRange errcode = parse_memory_size(tail, &long_max_heap_size, 1);
       if (errcode != arg_in_range) {
--- a/hotspot/src/share/vm/runtime/globals.hpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Wed Jul 05 18:55:08 2017 +0200
@@ -2968,7 +2968,7 @@
                                                                             \
   /* gc parameters */                                                       \
   product(uintx, InitialHeapSize, 0,                                        \
-          "Initial heap size (in bytes); zero means OldSize + NewSize")     \
+          "Initial heap size (in bytes); zero means use ergonomics")        \
                                                                             \
   product(uintx, MaxHeapSize, ScaleForWordSize(96*M),                       \
           "Maximum heap size (in bytes)")                                   \
--- a/hotspot/src/share/vm/runtime/os.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/runtime/os.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -1457,6 +1457,18 @@
 
   return result;
 }
+
+char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint,
+   MEMFLAGS flags) {
+  char* result = pd_reserve_memory(bytes, addr, alignment_hint);
+  if (result != NULL) {
+    MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
+    MemTracker::record_virtual_memory_type((address)result, flags);
+  }
+
+  return result;
+}
+
 char* os::attempt_reserve_memory_at(size_t bytes, char* addr) {
   char* result = pd_attempt_reserve_memory_at(bytes, addr);
   if (result != NULL) {
--- a/hotspot/src/share/vm/runtime/os.hpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/runtime/os.hpp	Wed Jul 05 18:55:08 2017 +0200
@@ -255,6 +255,8 @@
   static int    vm_allocation_granularity();
   static char*  reserve_memory(size_t bytes, char* addr = 0,
                                size_t alignment_hint = 0);
+  static char*  reserve_memory(size_t bytes, char* addr,
+                               size_t alignment_hint, MEMFLAGS flags);
   static char*  reserve_memory_aligned(size_t size, size_t alignment);
   static char*  attempt_reserve_memory_at(size_t bytes, char* addr);
   static void   split_reserved_memory(char *base, size_t size,
--- a/hotspot/src/share/vm/runtime/serviceThread.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/runtime/serviceThread.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,8 @@
 #include "runtime/mutexLocker.hpp"
 #include "prims/jvmtiImpl.hpp"
 #include "services/gcNotifier.hpp"
+#include "services/diagnosticArgument.hpp"
+#include "services/diagnosticFramework.hpp"
 
 ServiceThread* ServiceThread::_instance = NULL;
 
@@ -83,6 +85,7 @@
     bool sensors_changed = false;
     bool has_jvmti_events = false;
     bool has_gc_notification_event = false;
+    bool has_dcmd_notification_event = false;
     JvmtiDeferredEvent jvmti_event;
     {
       // Need state transition ThreadBlockInVM so that this thread
@@ -98,7 +101,8 @@
       MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
       while (!(sensors_changed = LowMemoryDetector::has_pending_requests()) &&
              !(has_jvmti_events = JvmtiDeferredEventQueue::has_events()) &&
-              !(has_gc_notification_event = GCNotifier::has_event())) {
+              !(has_gc_notification_event = GCNotifier::has_event()) &&
+              !(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification())) {
         // wait until one of the sensors has pending requests, or there is a
         // pending JVMTI event or JMX GC notification to post
         Service_lock->wait(Mutex::_no_safepoint_check_flag);
@@ -120,6 +124,10 @@
     if(has_gc_notification_event) {
         GCNotifier::sendNotification(CHECK);
     }
+
+    if(has_dcmd_notification_event) {
+      DCmdFactory::send_notification(CHECK);
+    }
   }
 }
 
--- a/hotspot/src/share/vm/services/attachListener.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/services/attachListener.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -157,7 +157,7 @@
   Thread* THREAD = Thread::current();
   // All the supplied jcmd arguments are stored as a single
   // string (op->arg(0)). This is parsed by the Dcmd framework.
-  DCmd::parse_and_execute(out, op->arg(0), ' ', THREAD);
+  DCmd::parse_and_execute(DCmd_Source_AttachAPI, out, op->arg(0), ' ', THREAD);
   if (HAS_PENDING_EXCEPTION) {
     java_lang_Throwable::print(PENDING_EXCEPTION, out);
     out->cr();
--- a/hotspot/src/share/vm/services/diagnosticCommand.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/services/diagnosticCommand.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -34,26 +34,33 @@
 
 void DCmdRegistrant::register_dcmds(){
   // Registration of the diagnostic commands
-  // First boolean argument specifies if the command is enabled
-  // Second boolean argument specifies if the command is hidden
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<HelpDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<VersionDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<CommandLineDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<PrintSystemPropertiesDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<PrintVMFlagsDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<VMUptimeDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<SystemGCDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<RunFinalizationDCmd>(true, false));
+  // First argument specifies which interfaces will export the command
+  // Second argument specifies if the command is enabled
+  // Third  argument specifies if the command is hidden
+  uint32_t full_export = DCmd_Source_Internal | DCmd_Source_AttachAPI
+                         | DCmd_Source_MBean;
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<HelpDCmd>(full_export, true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<VersionDCmd>(full_export, true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<CommandLineDCmd>(full_export, true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<PrintSystemPropertiesDCmd>(full_export, true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<PrintVMFlagsDCmd>(full_export, true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<VMUptimeDCmd>(full_export, true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<SystemGCDCmd>(full_export, true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<RunFinalizationDCmd>(full_export, true, false));
 #if INCLUDE_SERVICES // Heap dumping/inspection supported
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<HeapDumpDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ClassHistogramDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ClassStatsDCmd>(true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<HeapDumpDCmd>(full_export, true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ClassHistogramDCmd>(full_export, true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ClassStatsDCmd>(full_export, true, false));
 #endif // INCLUDE_SERVICES
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ThreadDumpDCmd>(true, false));
-  //Enhanced JMX Agent Support
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<JMXStartRemoteDCmd>(true,false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<JMXStartLocalDCmd>(true,false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<JMXStopRemoteDCmd>(true,false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ThreadDumpDCmd>(full_export, true, false));
+
+  // Enhanced JMX Agent Support
+  // These commands won't be exported via the DiagnosticCommandMBean until an
+  // appropriate permission is created for them
+  uint32_t jmx_agent_export_flags = DCmd_Source_Internal | DCmd_Source_AttachAPI;
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<JMXStartRemoteDCmd>(jmx_agent_export_flags, true,false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<JMXStartLocalDCmd>(jmx_agent_export_flags, true,false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<JMXStopRemoteDCmd>(jmx_agent_export_flags, true,false));
 
 }
 
@@ -72,29 +79,37 @@
   _dcmdparser.add_dcmd_argument(&_cmd);
 };
 
-void HelpDCmd::execute(TRAPS) {
+void HelpDCmd::execute(DCmdSource source, TRAPS) {
   if (_all.value()) {
-    GrowableArray<const char*>* cmd_list = DCmdFactory::DCmd_list();
+    GrowableArray<const char*>* cmd_list = DCmdFactory::DCmd_list(source);
     for (int i = 0; i < cmd_list->length(); i++) {
-      DCmdFactory* factory = DCmdFactory::factory(cmd_list->at(i),
+      DCmdFactory* factory = DCmdFactory::factory(source, cmd_list->at(i),
                                                   strlen(cmd_list->at(i)));
-      if (!factory->is_hidden()) {
-        output()->print_cr("%s%s", factory->name(),
-                           factory->is_enabled() ? "" : " [disabled]");
-        output()->print_cr("\t%s", factory->description());
-        output()->cr();
-      }
+      output()->print_cr("%s%s", factory->name(),
+                         factory->is_enabled() ? "" : " [disabled]");
+      output()->print_cr("\t%s", factory->description());
+      output()->cr();
       factory = factory->next();
     }
   } else if (_cmd.has_value()) {
     DCmd* cmd = NULL;
-    DCmdFactory* factory = DCmdFactory::factory(_cmd.value(),
+    DCmdFactory* factory = DCmdFactory::factory(source, _cmd.value(),
                                                 strlen(_cmd.value()));
     if (factory != NULL) {
       output()->print_cr("%s%s", factory->name(),
                          factory->is_enabled() ? "" : " [disabled]");
       output()->print_cr(factory->description());
       output()->print_cr("\nImpact: %s", factory->impact());
+      JavaPermission p = factory->permission();
+      if(p._class != NULL) {
+        if(p._action != NULL) {
+          output()->print_cr("\nPermission: %s(%s, %s)",
+                  p._class, p._name == NULL ? "null" : p._name, p._action);
+        } else {
+          output()->print_cr("\nPermission: %s(%s)",
+                  p._class, p._name == NULL ? "null" : p._name);
+        }
+      }
       output()->cr();
       cmd = factory->create_resource_instance(output());
       if (cmd != NULL) {
@@ -106,14 +121,12 @@
     }
   } else {
     output()->print_cr("The following commands are available:");
-    GrowableArray<const char *>* cmd_list = DCmdFactory::DCmd_list();
+    GrowableArray<const char *>* cmd_list = DCmdFactory::DCmd_list(source);
     for (int i = 0; i < cmd_list->length(); i++) {
-      DCmdFactory* factory = DCmdFactory::factory(cmd_list->at(i),
+      DCmdFactory* factory = DCmdFactory::factory(source, cmd_list->at(i),
                                                   strlen(cmd_list->at(i)));
-      if (!factory->is_hidden()) {
-        output()->print_cr("%s%s", factory->name(),
-                           factory->is_enabled() ? "" : " [disabled]");
-      }
+      output()->print_cr("%s%s", factory->name(),
+                         factory->is_enabled() ? "" : " [disabled]");
       factory = factory->_next;
     }
     output()->print_cr("\nFor more information about a specific command use 'help <command>'.");
@@ -131,7 +144,7 @@
   }
 }
 
-void VersionDCmd::execute(TRAPS) {
+void VersionDCmd::execute(DCmdSource source, TRAPS) {
   output()->print_cr("%s version %s", Abstract_VM_Version::vm_name(),
           Abstract_VM_Version::vm_release());
   JDK_Version jdk_version = JDK_Version::current();
@@ -150,7 +163,7 @@
   _dcmdparser.add_dcmd_option(&_all);
 }
 
-void PrintVMFlagsDCmd::execute(TRAPS) {
+void PrintVMFlagsDCmd::execute(DCmdSource source, TRAPS) {
   if (_all.value()) {
     CommandLineFlags::printFlags(output(), true);
   } else {
@@ -169,7 +182,7 @@
     }
 }
 
-void PrintSystemPropertiesDCmd::execute(TRAPS) {
+void PrintSystemPropertiesDCmd::execute(DCmdSource source, TRAPS) {
   // load sun.misc.VMSupport
   Symbol* klass = vmSymbols::sun_misc_VMSupport();
   Klass* k = SystemDictionary::resolve_or_fail(klass, true, CHECK);
@@ -219,7 +232,7 @@
   _dcmdparser.add_dcmd_option(&_date);
 }
 
-void VMUptimeDCmd::execute(TRAPS) {
+void VMUptimeDCmd::execute(DCmdSource source, TRAPS) {
   if (_date.value()) {
     output()->date_stamp(true, "", ": ");
   }
@@ -239,11 +252,15 @@
   }
 }
 
-void SystemGCDCmd::execute(TRAPS) {
-  Universe::heap()->collect(GCCause::_java_lang_system_gc);
+void SystemGCDCmd::execute(DCmdSource source, TRAPS) {
+  if (!DisableExplicitGC) {
+    Universe::heap()->collect(GCCause::_java_lang_system_gc);
+  } else {
+    output()->print_cr("Explicit GC is disabled, no GC has been performed.");
+  }
 }
 
-void RunFinalizationDCmd::execute(TRAPS) {
+void RunFinalizationDCmd::execute(DCmdSource source, TRAPS) {
   Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_System(),
                                                  true, CHECK);
   instanceKlassHandle klass(THREAD, k);
@@ -263,7 +280,7 @@
   _dcmdparser.add_dcmd_argument(&_filename);
 }
 
-void HeapDumpDCmd::execute(TRAPS) {
+void HeapDumpDCmd::execute(DCmdSource source, TRAPS) {
   // Request a full GC before heap dump if _all is false
   // This helps reduces the amount of unreachable objects in the dump
   // and makes it easier to browse.
@@ -301,7 +318,7 @@
   _dcmdparser.add_dcmd_option(&_all);
 }
 
-void ClassHistogramDCmd::execute(TRAPS) {
+void ClassHistogramDCmd::execute(DCmdSource source, TRAPS) {
   VM_GC_HeapInspection heapop(output(),
                               !_all.value() /* request full gc if false */,
                               true /* need_prologue */);
@@ -337,7 +354,7 @@
   _dcmdparser.add_dcmd_argument(&_columns);
 }
 
-void ClassStatsDCmd::execute(TRAPS) {
+void ClassStatsDCmd::execute(DCmdSource source, TRAPS) {
   if (!UnlockDiagnosticVMOptions) {
     output()->print_cr("GC.class_stats command requires -XX:+UnlockDiagnosticVMOptions");
     return;
@@ -384,7 +401,7 @@
   _dcmdparser.add_dcmd_option(&_locks);
 }
 
-void ThreadDumpDCmd::execute(TRAPS) {
+void ThreadDumpDCmd::execute(DCmdSource source, TRAPS) {
   // thread stacks
   VM_PrintThreads op1(output(), _locks.value());
   VMThread::execute(&op1);
@@ -526,7 +543,8 @@
   }
 }
 
-void JMXStartRemoteDCmd::execute(TRAPS) {
+
+void JMXStartRemoteDCmd::execute(DCmdSource source, TRAPS) {
     ResourceMark rm(THREAD);
     HandleMark hm(THREAD);
 
@@ -593,7 +611,7 @@
   // do nothing
 }
 
-void JMXStartLocalDCmd::execute(TRAPS) {
+void JMXStartLocalDCmd::execute(DCmdSource source, TRAPS) {
     ResourceMark rm(THREAD);
     HandleMark hm(THREAD);
 
@@ -611,7 +629,7 @@
 }
 
 
-void JMXStopRemoteDCmd::execute(TRAPS) {
+void JMXStopRemoteDCmd::execute(DCmdSource source, TRAPS) {
     ResourceMark rm(THREAD);
     HandleMark hm(THREAD);
 
--- a/hotspot/src/share/vm/services/diagnosticCommand.hpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/services/diagnosticCommand.hpp	Wed Jul 05 18:55:08 2017 +0200
@@ -51,7 +51,7 @@
   }
   static const char* impact() { return "Low"; }
   static int num_arguments();
-  virtual void execute(TRAPS);
+  virtual void execute(DCmdSource source, TRAPS);
 };
 
 class VersionDCmd : public DCmd {
@@ -62,8 +62,13 @@
     return "Print JVM version information.";
   }
   static const char* impact() { return "Low"; }
+  static const JavaPermission permission() {
+    JavaPermission p = {"java.util.PropertyPermission",
+                        "java.vm.version", "read"};
+    return p;
+  }
   static int num_arguments() { return 0; }
-  virtual void execute(TRAPS);
+  virtual void execute(DCmdSource source, TRAPS);
 };
 
 class CommandLineDCmd : public DCmd {
@@ -74,8 +79,13 @@
     return "Print the command line used to start this VM instance.";
   }
   static const char* impact() { return "Low"; }
+  static const JavaPermission permission() {
+    JavaPermission p = {"java.lang.management.ManagementPermission",
+                        "monitor", NULL};
+    return p;
+  }
   static int num_arguments() { return 0; }
-  virtual void execute(TRAPS) {
+  virtual void execute(DCmdSource source, TRAPS) {
     Arguments::print_on(_output);
   }
 };
@@ -91,8 +101,13 @@
     static const char* impact() {
       return "Low";
     }
+    static const JavaPermission permission() {
+      JavaPermission p = {"java.util.PropertyPermission",
+                          "*", "read"};
+      return p;
+    }
     static int num_arguments() { return 0; }
-    virtual void execute(TRAPS);
+    virtual void execute(DCmdSource source, TRAPS);
 };
 
 // See also: print_flag in attachListener.cpp
@@ -108,8 +123,13 @@
   static const char* impact() {
     return "Low";
   }
+  static const JavaPermission permission() {
+    JavaPermission p = {"java.lang.management.ManagementPermission",
+                        "monitor", NULL};
+    return p;
+  }
   static int num_arguments();
-  virtual void execute(TRAPS);
+  virtual void execute(DCmdSource source, TRAPS);
 };
 
 class VMUptimeDCmd : public DCmdWithParser {
@@ -125,7 +145,7 @@
     return "Low";
   }
   static int num_arguments();
-  virtual void execute(TRAPS);
+  virtual void execute(DCmdSource source, TRAPS);
 };
 
 class SystemGCDCmd : public DCmd {
@@ -139,7 +159,7 @@
       return "Medium: Depends on Java heap size and content.";
     }
     static int num_arguments() { return 0; }
-    virtual void execute(TRAPS);
+    virtual void execute(DCmdSource source, TRAPS);
 };
 
 class RunFinalizationDCmd : public DCmd {
@@ -153,7 +173,7 @@
       return "Medium: Depends on Java content.";
     }
     static int num_arguments() { return 0; }
-    virtual void execute(TRAPS);
+    virtual void execute(DCmdSource source, TRAPS);
 };
 
 #if INCLUDE_SERVICES   // Heap dumping supported
@@ -174,8 +194,13 @@
     return "High: Depends on Java heap size and content. "
            "Request a full GC unless the '-all' option is specified.";
   }
+  static const JavaPermission permission() {
+    JavaPermission p = {"java.lang.management.ManagementPermission",
+                        "monitor", NULL};
+    return p;
+  }
   static int num_arguments();
-  virtual void execute(TRAPS);
+  virtual void execute(DCmdSource source, TRAPS);
 };
 #endif // INCLUDE_SERVICES
 
@@ -194,8 +219,13 @@
   static const char* impact() {
     return "High: Depends on Java heap size and content.";
   }
+  static const JavaPermission permission() {
+    JavaPermission p = {"java.lang.management.ManagementPermission",
+                        "monitor", NULL};
+    return p;
+  }
   static int num_arguments();
-  virtual void execute(TRAPS);
+  virtual void execute(DCmdSource source, TRAPS);
 };
 
 class ClassStatsDCmd : public DCmdWithParser {
@@ -216,7 +246,7 @@
     return "High: Depends on Java heap size and content.";
   }
   static int num_arguments();
-  virtual void execute(TRAPS);
+  virtual void execute(DCmdSource source, TRAPS);
 };
 
 // See also: thread_dump in attachListener.cpp
@@ -232,8 +262,13 @@
   static const char* impact() {
     return "Medium: Depends on the number of threads.";
   }
+  static const JavaPermission permission() {
+    JavaPermission p = {"java.lang.management.ManagementPermission",
+                        "monitor", NULL};
+    return p;
+  }
   static int num_arguments();
-  virtual void execute(TRAPS);
+  virtual void execute(DCmdSource source, TRAPS);
 };
 
 // Enhanced JMX Agent support
@@ -281,7 +316,7 @@
 
   static int num_arguments();
 
-  virtual void execute(TRAPS);
+  virtual void execute(DCmdSource source, TRAPS);
 
 };
 
@@ -302,7 +337,7 @@
     return "Start local management agent.";
   }
 
-  virtual void execute(TRAPS);
+  virtual void execute(DCmdSource source, TRAPS);
 
 };
 
@@ -321,7 +356,7 @@
     return "Stop remote management agent.";
   }
 
-  virtual void execute(TRAPS);
+  virtual void execute(DCmdSource source, TRAPS);
 };
 
 #endif // SHARE_VM_SERVICES_DIAGNOSTICCOMMAND_HPP
--- a/hotspot/src/share/vm/services/diagnosticFramework.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/services/diagnosticFramework.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -359,7 +359,7 @@
   while (arg != NULL) {
     array->append(new DCmdArgumentInfo(arg->name(), arg->description(),
                   arg->type(), arg->default_string(), arg->is_mandatory(),
-                  false, idx));
+                  false, arg->allow_multiple(), idx));
     idx++;
     arg = arg->next();
   }
@@ -367,32 +367,42 @@
   while (arg != NULL) {
     array->append(new DCmdArgumentInfo(arg->name(), arg->description(),
                   arg->type(), arg->default_string(), arg->is_mandatory(),
-                  true));
+                  true, arg->allow_multiple()));
     arg = arg->next();
   }
   return array;
 }
 
 DCmdFactory* DCmdFactory::_DCmdFactoryList = NULL;
+bool DCmdFactory::_has_pending_jmx_notification = false;
 
-void DCmd::parse_and_execute(outputStream* out, const char* cmdline,
-                             char delim, TRAPS) {
+void DCmd::parse_and_execute(DCmdSource source, outputStream* out,
+                             const char* cmdline, char delim, TRAPS) {
 
   if (cmdline == NULL) return; // Nothing to do!
   DCmdIter iter(cmdline, '\n');
 
+  int count = 0;
   while (iter.has_next()) {
+    if(source == DCmd_Source_MBean && count > 0) {
+      // When diagnostic commands are invoked via JMX, each command line
+      // must contains one and only one command because of the Permission
+      // checks performed by the DiagnosticCommandMBean
+      THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+              "Invalid syntax");
+    }
     CmdLine line = iter.next();
     if (line.is_stop()) {
       break;
     }
     if (line.is_executable()) {
-      DCmd* command = DCmdFactory::create_local_DCmd(line, out, CHECK);
+      DCmd* command = DCmdFactory::create_local_DCmd(source, line, out, CHECK);
       assert(command != NULL, "command error must be handled before this line");
       DCmdMark mark(command);
       command->parse(&line, delim, CHECK);
-      command->execute(CHECK);
+      command->execute(source, CHECK);
     }
+    count++;
   }
 }
 
@@ -420,15 +430,78 @@
   return _dcmdparser.argument_info_array();
 }
 
-Mutex* DCmdFactory::_dcmdFactory_lock = new Mutex(Mutex::leaf, "DCmdFactory", true);
+void DCmdFactory::push_jmx_notification_request() {
+  MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
+  _has_pending_jmx_notification = true;
+  Service_lock->notify_all();
+}
+
+void DCmdFactory::send_notification(TRAPS) {
+  DCmdFactory::send_notification_internal(THREAD);
+  // Clearing pending exception to avoid premature termination of
+  // the service thread
+  if (HAS_PENDING_EXCEPTION) {
+    CLEAR_PENDING_EXCEPTION;
+  }
+}
+void DCmdFactory::send_notification_internal(TRAPS) {
+  ResourceMark rm(THREAD);
+  HandleMark hm(THREAD);
+  bool notif = false;
+  {
+    MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
+    notif = _has_pending_jmx_notification;
+    _has_pending_jmx_notification = false;
+  }
+  if (notif) {
+
+    Klass* k = Management::sun_management_ManagementFactoryHelper_klass(CHECK);
+    instanceKlassHandle mgmt_factory_helper_klass(THREAD, k);
 
-DCmdFactory* DCmdFactory::factory(const char* name, size_t len) {
+    JavaValue result(T_OBJECT);
+    JavaCalls::call_static(&result,
+            mgmt_factory_helper_klass,
+            vmSymbols::getDiagnosticCommandMBean_name(),
+            vmSymbols::getDiagnosticCommandMBean_signature(),
+            CHECK);
+
+    instanceOop m = (instanceOop) result.get_jobject();
+    instanceHandle dcmd_mbean_h(THREAD, m);
+
+    Klass* k2 = Management::sun_management_DiagnosticCommandImpl_klass(CHECK);
+    instanceKlassHandle dcmd_mbean_klass(THREAD, k2);
+
+    if (!dcmd_mbean_h->is_a(k2)) {
+      THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+              "ManagementFactory.getDiagnosticCommandMBean didn't return a DiagnosticCommandMBean instance");
+    }
+
+    JavaValue result2(T_VOID);
+    JavaCallArguments args2(dcmd_mbean_h);
+
+    JavaCalls::call_virtual(&result2,
+            dcmd_mbean_klass,
+            vmSymbols::createDiagnosticFrameworkNotification_name(),
+            vmSymbols::void_method_signature(),
+            &args2,
+            CHECK);
+  }
+}
+
+Mutex* DCmdFactory::_dcmdFactory_lock = new Mutex(Mutex::leaf, "DCmdFactory", true);
+bool DCmdFactory::_send_jmx_notification = false;
+
+DCmdFactory* DCmdFactory::factory(DCmdSource source, const char* name, size_t len) {
   MutexLockerEx ml(_dcmdFactory_lock, Mutex::_no_safepoint_check_flag);
   DCmdFactory* factory = _DCmdFactoryList;
   while (factory != NULL) {
     if (strlen(factory->name()) == len &&
         strncmp(name, factory->name(), len) == 0) {
-      return factory;
+      if(factory->export_flags() & source) {
+        return factory;
+      } else {
+        return NULL;
+      }
     }
     factory = factory->_next;
   }
@@ -439,11 +512,16 @@
   MutexLockerEx ml(_dcmdFactory_lock, Mutex::_no_safepoint_check_flag);
   factory->_next = _DCmdFactoryList;
   _DCmdFactoryList = factory;
+  if (_send_jmx_notification && !factory->_hidden
+      && (factory->_export_flags & DCmd_Source_MBean)) {
+    DCmdFactory::push_jmx_notification_request();
+  }
   return 0; // Actually, there's no checks for duplicates
 }
 
-DCmd* DCmdFactory::create_global_DCmd(CmdLine &line, outputStream* out, TRAPS) {
-  DCmdFactory* f = factory(line.cmd_addr(), line.cmd_len());
+DCmd* DCmdFactory::create_global_DCmd(DCmdSource source, CmdLine &line,
+                                      outputStream* out, TRAPS) {
+  DCmdFactory* f = factory(source, line.cmd_addr(), line.cmd_len());
   if (f != NULL) {
     if (f->is_enabled()) {
       THROW_MSG_NULL(vmSymbols::java_lang_IllegalArgumentException(),
@@ -455,8 +533,9 @@
              "Unknown diagnostic command");
 }
 
-DCmd* DCmdFactory::create_local_DCmd(CmdLine &line, outputStream* out, TRAPS) {
-  DCmdFactory* f = factory(line.cmd_addr(), line.cmd_len());
+DCmd* DCmdFactory::create_local_DCmd(DCmdSource source, CmdLine &line,
+                                     outputStream* out, TRAPS) {
+  DCmdFactory* f = factory(source, line.cmd_addr(), line.cmd_len());
   if (f != NULL) {
     if (!f->is_enabled()) {
       THROW_MSG_NULL(vmSymbols::java_lang_IllegalArgumentException(),
@@ -468,12 +547,12 @@
              "Unknown diagnostic command");
 }
 
-GrowableArray<const char*>* DCmdFactory::DCmd_list() {
+GrowableArray<const char*>* DCmdFactory::DCmd_list(DCmdSource source) {
   MutexLockerEx ml(_dcmdFactory_lock, Mutex::_no_safepoint_check_flag);
   GrowableArray<const char*>* array = new GrowableArray<const char*>();
   DCmdFactory* factory = _DCmdFactoryList;
   while (factory != NULL) {
-    if (!factory->is_hidden()) {
+    if (!factory->is_hidden() && (factory->export_flags() & source)) {
       array->append(factory->name());
     }
     factory = factory->next();
@@ -481,15 +560,16 @@
   return array;
 }
 
-GrowableArray<DCmdInfo*>* DCmdFactory::DCmdInfo_list() {
+GrowableArray<DCmdInfo*>* DCmdFactory::DCmdInfo_list(DCmdSource source ) {
   MutexLockerEx ml(_dcmdFactory_lock, Mutex::_no_safepoint_check_flag);
   GrowableArray<DCmdInfo*>* array = new GrowableArray<DCmdInfo*>();
   DCmdFactory* factory = _DCmdFactoryList;
   while (factory != NULL) {
-    if (!factory->is_hidden()) {
+    if (!factory->is_hidden() && (factory->export_flags() & source)) {
       array->append(new DCmdInfo(factory->name(),
                     factory->description(), factory->impact(),
-                    factory->num_arguments(), factory->is_enabled()));
+                    factory->permission(), factory->num_arguments(),
+                    factory->is_enabled()));
     }
     factory = factory->next();
   }
--- a/hotspot/src/share/vm/services/diagnosticFramework.hpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/services/diagnosticFramework.hpp	Wed Jul 05 18:55:08 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,22 @@
 #include "utilities/ostream.hpp"
 
 
+enum DCmdSource {
+  DCmd_Source_Internal  = 0x01U,  // invocation from the JVM
+  DCmd_Source_AttachAPI = 0x02U,  // invocation via the attachAPI
+  DCmd_Source_MBean     = 0x04U   // invocation via a MBean
+};
+
+// Warning: strings referenced by the JavaPermission struct are passed to
+// the native part of the JDK. Avoid use of dynamically allocated strings
+// that could be de-allocated before the JDK native code had time to
+// convert them into Java Strings.
+struct JavaPermission {
+  const char* _class;
+  const char* _name;
+  const char* _action;
+};
+
 // CmdLine is the class used to handle a command line containing a single
 // diagnostic command and its arguments. It provides methods to access the
 // command name and the beginning of the arguments. The class is also
@@ -113,26 +129,30 @@
 // used to export the description to the JMX interface of the framework.
 class DCmdInfo : public ResourceObj {
 protected:
-  const char* _name;
-  const char* _description;
-  const char* _impact;
-  int         _num_arguments;
-  bool        _is_enabled;
+  const char* _name;           /* Name of the diagnostic command */
+  const char* _description;    /* Short description */
+  const char* _impact;         /* Impact on the JVM */
+  JavaPermission _permission;  /* Java Permission required to execute this command if any */
+  int         _num_arguments;  /* Number of supported options or arguments */
+  bool        _is_enabled;     /* True if the diagnostic command can be invoked, false otherwise */
 public:
   DCmdInfo(const char* name,
           const char* description,
           const char* impact,
+          JavaPermission permission,
           int num_arguments,
           bool enabled) {
     this->_name = name;
     this->_description = description;
     this->_impact = impact;
+    this->_permission = permission;
     this->_num_arguments = num_arguments;
     this->_is_enabled = enabled;
   }
   const char* name() const { return _name; }
   const char* description() const { return _description; }
   const char* impact() const { return _impact; }
+  JavaPermission permission() const { return _permission; }
   int num_arguments() const { return _num_arguments; }
   bool is_enabled() const { return _is_enabled; }
 
@@ -144,16 +164,20 @@
 // framework.
 class DCmdArgumentInfo : public ResourceObj {
 protected:
-  const char* _name;
-  const char* _description;
-  const char* _type;
-  const char* _default_string;
-  bool        _mandatory;
-  bool        _option;
-  int         _position;
+  const char* _name;            /* Option/Argument name*/
+  const char* _description;     /* Short description */
+  const char* _type;            /* Type: STRING, BOOLEAN, etc. */
+  const char* _default_string;  /* Default value in a parsable string */
+  bool        _mandatory;       /* True if the option/argument is mandatory */
+  bool        _option;          /* True if it is an option, false if it is an argument */
+                                /* (see diagnosticFramework.hpp for option/argument definitions) */
+  bool        _multiple;        /* True is the option can be specified several time */
+  int         _position;        /* Expected position for this argument (this field is */
+                                /* meaningless for options) */
 public:
   DCmdArgumentInfo(const char* name, const char* description, const char* type,
-                   const char* default_string, bool mandatory, bool option) {
+                   const char* default_string, bool mandatory, bool option,
+                   bool multiple) {
     this->_name = name;
     this->_description = description;
     this->_type = type;
@@ -161,11 +185,12 @@
     this->_option = option;
     this->_mandatory = mandatory;
     this->_option = option;
+    this->_multiple = multiple;
     this->_position = -1;
   }
   DCmdArgumentInfo(const char* name, const char* description, const char* type,
                    const char* default_string, bool mandatory, bool option,
-                   int position) {
+                   bool multiple, int position) {
     this->_name = name;
     this->_description = description;
     this->_type = type;
@@ -173,6 +198,7 @@
     this->_option = option;
     this->_mandatory = mandatory;
     this->_option = option;
+    this->_multiple = multiple;
     this->_position = position;
   }
   const char* name() const { return _name; }
@@ -181,11 +207,29 @@
   const char* default_string() const { return _default_string; }
   bool is_mandatory() const { return _mandatory; }
   bool is_option() const { return _option; }
+  bool is_multiple() const { return _multiple; }
   int position() const { return _position; }
 };
 
 // The DCmdParser class can be used to create an argument parser for a
 // diagnostic command. It is not mandatory to use it to parse arguments.
+// The DCmdParser parses a CmdLine instance according to the parameters that
+// have been declared by its associated diagnostic command. A parameter can
+// either be an option or an argument. Options are identified by the option name
+// while arguments are identified by their position in the command line. The
+// position of an argument is defined relative to all arguments passed on the
+// command line, options are not considered when defining an argument position.
+// The generic syntax of a diagnostic command is:
+//
+//    <command name> [<option>=<value>] [<argument_value>]
+//
+// Example:
+//
+//    command_name option1=value1 option2=value argumentA argumentB argumentC
+//
+// In this command line, the diagnostic command receives five parameters, two
+// options named option1 and option2, and three arguments. argumentA's position
+// is 0, argumentB's position is 1 and argumentC's position is 2.
 class DCmdParser {
 private:
   GenDCmdArgument* _options;
@@ -249,6 +293,19 @@
   // longer description can provide more specific details like the fact that Thread Dump
   // impact depends on the heap size.
   static const char* impact() { return "Low: No impact"; }
+  // The permission() method returns the description of Java Permission. This
+  // permission is required when the diagnostic command is invoked via the
+  // DiagnosticCommandMBean. The rationale for this permission check is that
+  // the DiagnosticCommandMBean can be used to perform remote invocations of
+  // diagnostic commands through the PlatformMBeanServer. The (optional) Java
+  // Permission associated with each diagnostic command should ease the work
+  // of system administrators to write policy files granting permissions to
+  // execute diagnostic commands to remote users. Any diagnostic command with
+  // a potential impact on security should overwrite this method.
+  static const JavaPermission permission() {
+    JavaPermission p = {NULL, NULL, NULL};
+    return p;
+  }
   static int num_arguments() { return 0; }
   outputStream* output() { return _output; }
   bool is_heap_allocated()  { return _is_heap_allocated; }
@@ -263,7 +320,7 @@
                 "The argument list of this diagnostic command should be empty.");
     }
   }
-  virtual void execute(TRAPS) { }
+  virtual void execute(DCmdSource source, TRAPS) { }
   virtual void reset(TRAPS) { }
   virtual void cleanup() { }
 
@@ -278,7 +335,7 @@
   }
 
   // main method to invoke the framework
-  static void parse_and_execute(outputStream* out, const char* cmdline,
+  static void parse_and_execute(DCmdSource source, outputStream* out, const char* cmdline,
                                 char delim, TRAPS);
 };
 
@@ -291,9 +348,10 @@
   static const char* description() { return "No Help";}
   static const char* disabled_message() { return "Diagnostic command currently disabled"; }
   static const char* impact() { return "Low: No impact"; }
+  static const JavaPermission permission() {JavaPermission p = {NULL, NULL, NULL}; return p; }
   static int num_arguments() { return 0; }
   virtual void parse(CmdLine *line, char delim, TRAPS);
-  virtual void execute(TRAPS) { }
+  virtual void execute(DCmdSource source, TRAPS) { }
   virtual void reset(TRAPS);
   virtual void cleanup();
   virtual void print_help(const char* name);
@@ -323,6 +381,8 @@
 class DCmdFactory: public CHeapObj<mtInternal> {
 private:
   static Mutex*       _dcmdFactory_lock;
+  static bool         _send_jmx_notification;
+  static bool         _has_pending_jmx_notification;
   // Pointer to the next factory in the singly-linked list of registered
   // diagnostic commands
   DCmdFactory*        _next;
@@ -333,19 +393,23 @@
   // When hidden, a diagnostic command doesn't appear in the list of commands
   // provided by the 'help' command.
   bool                _hidden;
+  uint32_t            _export_flags;
   int                 _num_arguments;
   static DCmdFactory* _DCmdFactoryList;
 public:
-  DCmdFactory(int num_arguments, bool enabled, bool hidden) {
+  DCmdFactory(int num_arguments, uint32_t flags, bool enabled, bool hidden) {
     _next = NULL;
     _enabled = enabled;
     _hidden = hidden;
+    _export_flags = flags;
     _num_arguments = num_arguments;
   }
   bool is_enabled() const { return _enabled; }
   void set_enabled(bool b) { _enabled = b; }
   bool is_hidden() const { return _hidden; }
   void set_hidden(bool b) { _hidden = b; }
+  uint32_t export_flags() { return _export_flags; }
+  void set_export_flags(uint32_t f) { _export_flags = f; }
   int num_arguments() { return _num_arguments; }
   DCmdFactory* next() { return _next; }
   virtual DCmd* create_Cheap_instance(outputStream* output) = 0;
@@ -353,19 +417,29 @@
   virtual const char* name() const = 0;
   virtual const char* description() const = 0;
   virtual const char* impact() const = 0;
+  virtual const JavaPermission permission() const = 0;
   virtual const char* disabled_message() const = 0;
   // Register a DCmdFactory to make a diagnostic command available.
   // Once registered, a diagnostic command must not be unregistered.
   // To prevent a diagnostic command from being executed, just set the
   // enabled flag to false.
   static int register_DCmdFactory(DCmdFactory* factory);
-  static DCmdFactory* factory(const char* cmd, size_t len);
+  static DCmdFactory* factory(DCmdSource source, const char* cmd, size_t len);
   // Returns a C-heap allocated diagnostic command for the given command line
-  static DCmd* create_global_DCmd(CmdLine &line, outputStream* out, TRAPS);
+  static DCmd* create_global_DCmd(DCmdSource source, CmdLine &line, outputStream* out, TRAPS);
   // Returns a resourceArea allocated diagnostic command for the given command line
-  static DCmd* create_local_DCmd(CmdLine &line, outputStream* out, TRAPS);
-  static GrowableArray<const char*>* DCmd_list();
-  static GrowableArray<DCmdInfo*>* DCmdInfo_list();
+  static DCmd* create_local_DCmd(DCmdSource source, CmdLine &line, outputStream* out, TRAPS);
+  static GrowableArray<const char*>* DCmd_list(DCmdSource source);
+  static GrowableArray<DCmdInfo*>* DCmdInfo_list(DCmdSource source);
+
+  static void set_jmx_notification_enabled(bool enabled) {
+    _send_jmx_notification = enabled;
+  }
+  static void push_jmx_notification_request();
+  static bool has_pending_jmx_notification() { return _has_pending_jmx_notification; }
+  static void send_notification(TRAPS);
+private:
+  static void send_notification_internal(TRAPS);
 
   friend class HelpDCmd;
 };
@@ -374,8 +448,8 @@
 // where this template is used to create and register factories.
 template <class DCmdClass> class DCmdFactoryImpl : public DCmdFactory {
 public:
-  DCmdFactoryImpl(bool enabled, bool hidden) :
-    DCmdFactory(DCmdClass::num_arguments(), enabled, hidden) { }
+  DCmdFactoryImpl(uint32_t flags, bool enabled, bool hidden) :
+    DCmdFactory(DCmdClass::num_arguments(), flags, enabled, hidden) { }
   // Returns a C-heap allocated instance
   virtual DCmd* create_Cheap_instance(outputStream* output) {
     return new (ResourceObj::C_HEAP, mtInternal) DCmdClass(output, true);
@@ -393,6 +467,9 @@
   virtual const char* impact() const {
     return DCmdClass::impact();
   }
+  virtual const JavaPermission permission() const {
+    return DCmdClass::permission();
+  }
   virtual const char* disabled_message() const {
      return DCmdClass::disabled_message();
   }
--- a/hotspot/src/share/vm/services/jmm.h	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/services/jmm.h	Wed Jul 05 18:55:08 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,7 +49,8 @@
   JMM_VERSION_1_1 = 0x20010100, // JDK 6
   JMM_VERSION_1_2 = 0x20010200, // JDK 7
   JMM_VERSION_1_2_1 = 0x20010201, // JDK 7 GA
-  JMM_VERSION     = 0x20010202
+  JMM_VERSION_1_2_2 = 0x20010202,
+  JMM_VERSION     = 0x20010203
 };
 
 typedef struct {
@@ -62,7 +63,8 @@
   unsigned int isObjectMonitorUsageSupported : 1;
   unsigned int isSynchronizerUsageSupported : 1;
   unsigned int isThreadAllocatedMemorySupported : 1;
-  unsigned int : 23;
+  unsigned int isRemoteDiagnosticCommandsSupported : 1;
+  unsigned int : 22;
 } jmmOptionalSupport;
 
 typedef enum {
@@ -190,21 +192,27 @@
 } jmmGCStat;
 
 typedef struct {
-  const char* name;
-  const char* description;
-  const char* impact;
-  int         num_arguments;
-  jboolean    enabled;
+  const char* name;                /* Name of the diagnostic command */
+  const char* description;         /* Short description */
+  const char* impact;              /* Impact on the JVM */
+  const char* permission_class;    /* Class name of the required permission if any */
+  const char* permission_name;     /* Permission name of the required permission if any */
+  const char* permission_action;   /* Action name of the required permission if any*/
+  int         num_arguments;       /* Number of supported options or arguments */
+  jboolean    enabled;             /* True if the diagnostic command can be invoked, false otherwise */
 } dcmdInfo;
 
 typedef struct {
-  const char* name;
-  const char* description;
-  const char* type;
-  const char* default_string;
-  jboolean    mandatory;
-  jboolean    option;
-  int         position;
+  const char* name;                /* Option/Argument name*/
+  const char* description;         /* Short description */
+  const char* type;                /* Type: STRING, BOOLEAN, etc. */
+  const char* default_string;      /* Default value in a parsable string */
+  jboolean    mandatory;           /* True if the option/argument is mandatory */
+  jboolean    option;              /* True if it is an option, false if it is an argument */
+                                   /* (see diagnosticFramework.hpp for option/argument definitions) */
+  jboolean    multiple;            /* True if the option can be specified several time */
+  int         position;            /* Expected position for this argument (this field is */
+                                   /* meaningless for options) */
 } dcmdArgInfo;
 
 typedef struct jmmInterface_1_ {
@@ -327,6 +335,9 @@
   jstring      (JNICALL *ExecuteDiagnosticCommand)
                                                  (JNIEnv *env,
                                                   jstring command);
+  void         (JNICALL *SetDiagnosticFrameworkNotificationEnabled)
+                                                 (JNIEnv *env,
+                                                  jboolean enabled);
 } JmmInterface;
 
 #ifdef __cplusplus
--- a/hotspot/src/share/vm/services/management.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/services/management.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -68,6 +68,9 @@
 Klass* Management::_managementFactory_klass = NULL;
 Klass* Management::_garbageCollectorImpl_klass = NULL;
 Klass* Management::_gcInfo_klass = NULL;
+Klass* Management::_diagnosticCommandImpl_klass = NULL;
+Klass* Management::_managementFactoryHelper_klass = NULL;
+
 
 jmmOptionalSupport Management::_optional_support = {0};
 TimeStamp Management::_stamp;
@@ -128,11 +131,14 @@
   _optional_support.isSynchronizerUsageSupported = 1;
 #endif // INCLUDE_SERVICES
   _optional_support.isThreadAllocatedMemorySupported = 1;
+  _optional_support.isRemoteDiagnosticCommandsSupported = 1;
 
   // Registration of the diagnostic commands
   DCmdRegistrant::register_dcmds();
   DCmdRegistrant::register_dcmds_ext();
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<NMTDCmd>(true, false));
+  uint32_t full_export = DCmd_Source_Internal | DCmd_Source_AttachAPI
+                         | DCmd_Source_MBean;
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<NMTDCmd>(full_export, true, false));
 }
 
 void Management::initialize(TRAPS) {
@@ -262,6 +268,20 @@
   return _gcInfo_klass;
 }
 
+Klass* Management::sun_management_DiagnosticCommandImpl_klass(TRAPS) {
+  if (_diagnosticCommandImpl_klass == NULL) {
+    _diagnosticCommandImpl_klass = load_and_initialize_klass(vmSymbols::sun_management_DiagnosticCommandImpl(), CHECK_NULL);
+  }
+  return _diagnosticCommandImpl_klass;
+}
+
+Klass* Management::sun_management_ManagementFactoryHelper_klass(TRAPS) {
+  if (_managementFactoryHelper_klass == NULL) {
+    _managementFactoryHelper_klass = load_and_initialize_klass(vmSymbols::sun_management_ManagementFactoryHelper(), CHECK_NULL);
+  }
+  return _managementFactoryHelper_klass;
+}
+
 static void initialize_ThreadInfo_constructor_arguments(JavaCallArguments* args, ThreadSnapshot* snapshot, TRAPS) {
   Handle snapshot_thread(THREAD, snapshot->threadObj());
 
@@ -2144,7 +2164,7 @@
 
 JVM_ENTRY(jobjectArray, jmm_GetDiagnosticCommands(JNIEnv *env))
   ResourceMark rm(THREAD);
-  GrowableArray<const char *>* dcmd_list = DCmdFactory::DCmd_list();
+  GrowableArray<const char *>* dcmd_list = DCmdFactory::DCmd_list(DCmd_Source_MBean);
   objArrayOop cmd_array_oop = oopFactory::new_objArray(SystemDictionary::String_klass(),
           dcmd_list->length(), CHECK_NULL);
   objArrayHandle cmd_array(THREAD, cmd_array_oop);
@@ -2173,7 +2193,7 @@
                "Array element type is not String class");
   }
 
-  GrowableArray<DCmdInfo *>* info_list = DCmdFactory::DCmdInfo_list();
+  GrowableArray<DCmdInfo *>* info_list = DCmdFactory::DCmdInfo_list(DCmd_Source_MBean);
 
   int num_cmds = cmds_ah->length();
   for (int i = 0; i < num_cmds; i++) {
@@ -2196,6 +2216,10 @@
     infoArray[i].name = info->name();
     infoArray[i].description = info->description();
     infoArray[i].impact = info->impact();
+    JavaPermission p = info->permission();
+    infoArray[i].permission_class = p._class;
+    infoArray[i].permission_name = p._name;
+    infoArray[i].permission_action = p._action;
     infoArray[i].num_arguments = info->num_arguments();
     infoArray[i].enabled = info->is_enabled();
   }
@@ -2215,7 +2239,8 @@
               "Command line content cannot be null.");
   }
   DCmd* dcmd = NULL;
-  DCmdFactory*factory = DCmdFactory::factory(cmd_name, strlen(cmd_name));
+  DCmdFactory*factory = DCmdFactory::factory(DCmd_Source_MBean, cmd_name,
+                                             strlen(cmd_name));
   if (factory != NULL) {
     dcmd = factory->create_resource_instance(NULL);
   }
@@ -2235,6 +2260,7 @@
     infoArray[i].default_string = array->at(i)->default_string();
     infoArray[i].mandatory = array->at(i)->is_mandatory();
     infoArray[i].option = array->at(i)->is_option();
+    infoArray[i].multiple = array->at(i)->is_multiple();
     infoArray[i].position = array->at(i)->position();
   }
   return;
@@ -2253,11 +2279,15 @@
                    "Command line content cannot be null.");
   }
   bufferedStream output;
-  DCmd::parse_and_execute(&output, cmdline, ' ', CHECK_NULL);
+  DCmd::parse_and_execute(DCmd_Source_MBean, &output, cmdline, ' ', CHECK_NULL);
   oop result = java_lang_String::create_oop_from_str(output.as_string(), CHECK_NULL);
   return (jstring) JNIHandles::make_local(env, result);
 JVM_END
 
+JVM_ENTRY(void, jmm_SetDiagnosticFrameworkNotificationEnabled(JNIEnv *env, jboolean enabled))
+  DCmdFactory::set_jmx_notification_enabled(enabled?true:false);
+JVM_END
+
 jlong Management::ticks_to_ms(jlong ticks) {
   assert(os::elapsed_frequency() > 0, "Must be non-zero");
   return (jlong)(((double)ticks / (double)os::elapsed_frequency())
@@ -2304,7 +2334,8 @@
   jmm_GetDiagnosticCommands,
   jmm_GetDiagnosticCommandInfo,
   jmm_GetDiagnosticCommandArgumentsInfo,
-  jmm_ExecuteDiagnosticCommand
+  jmm_ExecuteDiagnosticCommand,
+  jmm_SetDiagnosticFrameworkNotificationEnabled
 };
 #endif // INCLUDE_MANAGEMENT
 
--- a/hotspot/src/share/vm/services/management.hpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/services/management.hpp	Wed Jul 05 18:55:08 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,6 +50,8 @@
   static Klass*             _garbageCollectorMXBean_klass;
   static Klass*             _managementFactory_klass;
   static Klass*             _garbageCollectorImpl_klass;
+  static Klass*             _diagnosticCommandImpl_klass;
+  static Klass*             _managementFactoryHelper_klass;
   static Klass*             _gcInfo_klass;
 
   static Klass* load_and_initialize_klass(Symbol* sh, TRAPS);
@@ -99,6 +101,10 @@
       NOT_MANAGEMENT_RETURN_(NULL);
   static Klass* com_sun_management_GcInfo_klass(TRAPS)
       NOT_MANAGEMENT_RETURN_(NULL);
+  static Klass* sun_management_DiagnosticCommandImpl_klass(TRAPS)
+      NOT_MANAGEMENT_RETURN_(NULL);
+  static Klass* sun_management_ManagementFactoryHelper_klass(TRAPS)
+      NOT_MANAGEMENT_RETURN_(NULL);
 
   static instanceOop create_thread_info_instance(ThreadSnapshot* snapshot, TRAPS);
   static instanceOop create_thread_info_instance(ThreadSnapshot* snapshot, objArrayHandle monitors_array, typeArrayHandle depths_array, objArrayHandle synchronizers_array, TRAPS);
--- a/hotspot/src/share/vm/services/memSnapshot.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/services/memSnapshot.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -262,13 +262,28 @@
   assert(cur->is_reserved_region() && cur->contains_region(rec),
     "Sanity check");
   if (rec->is_same_region(cur)) {
-    // release whole reserved region
+
+    // In snapshot, the virtual memory records are sorted in following orders:
+    // 1. virtual memory's base address
+    // 2. virtual memory reservation record, followed by commit records within this reservation.
+    //    The commit records are also in base address order.
+    // When a reserved region is released, we want to remove the reservation record and all
+    // commit records following it.
 #ifdef ASSERT
-    VMMemRegion* next_region = (VMMemRegion*)peek_next();
-    // should not have any committed memory in this reserved region
-    assert(next_region == NULL || !next_region->is_committed_region(), "Sanity check");
+    address low_addr = cur->addr();
+    address high_addr = low_addr + cur->size();
 #endif
+    // remove virtual memory reservation record
     remove();
+    // remove committed regions within above reservation
+    VMMemRegion* next_region = (VMMemRegion*)current();
+    while (next_region != NULL && next_region->is_committed_region()) {
+      assert(next_region->addr() >= low_addr &&
+             next_region->addr() + next_region->size() <= high_addr,
+            "Range check");
+      remove();
+      next_region = (VMMemRegion*)current();
+    }
   } else if (rec->addr() == cur->addr() ||
     rec->addr() + rec->size() == cur->addr() + cur->size()) {
     // released region is at either end of this region
--- a/hotspot/src/share/vm/services/nmtDCmd.cpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/services/nmtDCmd.cpp	Wed Jul 05 18:55:08 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -71,7 +71,7 @@
   _dcmdparser.add_dcmd_option(&_scale);
 }
 
-void NMTDCmd::execute(TRAPS) {
+void NMTDCmd::execute(DCmdSource source, TRAPS) {
   const char* scale_value = _scale.value();
   size_t scale_unit;
   if (strcmp(scale_value, "KB") == 0 || strcmp(scale_value, "kb") == 0) {
--- a/hotspot/src/share/vm/services/nmtDCmd.hpp	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/src/share/vm/services/nmtDCmd.hpp	Wed Jul 05 18:55:08 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -52,10 +52,15 @@
     return "Print native memory usage";
   }
   static const char* impact() {
-    return "Medium:";
+    return "Medium";
+  }
+  static const JavaPermission permission() {
+    JavaPermission p = {"java.lang.management.ManagementPermission",
+                        "monitor", NULL};
+    return p;
   }
   static int num_arguments();
-  virtual void execute(TRAPS);
+  virtual void execute(DCmdSource source, TRAPS);
 };
 
 #endif // SHARE_VM_SERVICES_NMT_DCMD_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/concurrentMarkSweep/CheckAllocateAndSystemGC.java	Wed Jul 05 18:55:08 2017 +0200
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test CheckAllocateAndSystemGC
+ * @summary CMS: assert(used() == used_after_gc && used_after_gc <= capacity()) failed: used: 0 used_after_gc: 292080 capacity: 1431699456
+ * @bug 8013032
+ * @key gc
+ * @key regression
+ * @library /testlibrary
+ * @run main/othervm CheckAllocateAndSystemGC
+ * @author jon.masamitsu@oracle.com
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class CheckAllocateAndSystemGC {
+  public static void main(String args[]) throws Exception {
+
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+      "-showversion",
+      "-XX:+UseConcMarkSweepGC",
+      "-Xmn4m",
+      "-XX:MaxTenuringThreshold=1",
+      "-XX:-UseCMSCompactAtFullCollection",
+      "CheckAllocateAndSystemGC$AllocateAndSystemGC"
+      );
+
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+    output.shouldNotContain("error");
+
+    output.shouldHaveExitValue(0);
+  }
+  static class AllocateAndSystemGC {
+    public static void main(String [] args) {
+      Integer x[] = new Integer [1000];
+      // Allocate enough objects to cause a minor collection.
+      // These allocations suffice for a 4m young geneneration.
+      for (int i = 0; i < 100; i++) {
+        Integer y[] = new Integer[10000];
+      }
+      System.gc();
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/concurrentMarkSweep/SystemGCOnForegroundCollector.java	Wed Jul 05 18:55:08 2017 +0200
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test SystemGCOnForegroundCollector
+ * @summary CMS: Call reset_after_compaction() only if a compaction has been done
+ * @bug 8013184
+ * @key gc
+ * @key regression
+ * @library /testlibrary
+ * @run main/othervm SystemGCOnForegroundCollector
+ * @author jon.masamitsu@oracle.com
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class SystemGCOnForegroundCollector {
+  public static void main(String args[]) throws Exception {
+
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+      "-showversion",
+      "-XX:+UseConcMarkSweepGC",
+      "-XX:MaxTenuringThreshold=1",
+      "-XX:-UseCMSCompactAtFullCollection",
+      ThreePlusMSSystemGC.class.getName()
+      );
+
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+    output.shouldNotContain("error");
+
+    output.shouldHaveExitValue(0);
+  }
+
+  static class ThreePlusMSSystemGC {
+    public static void main(String [] args) {
+      // From running this test 3 System.gc() were always
+      // enough to see the failure but the cause of the failure
+      // depends on how objects are allocated in the CMS generation
+      // which is non-deterministic.  Use 30 iterations for a more
+      // reliable test.
+      for (int i = 0; i < 30; i++) {
+        System.gc();
+      }
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/g1/TestRegionAlignment.java	Wed Jul 05 18:55:08 2017 +0200
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestRegionAlignment.java
+ * @bug 8013791
+ * @summary Make sure that G1 ergonomics pick a heap size that is aligned with the region size
+ * @run main/othervm -XX:+UseG1GC -XX:G1HeapRegionSize=32m -XX:MaxRAM=555m TestRegionAlignment
+ *
+ * When G1 ergonomically picks a maximum heap size it must be aligned to the region size.
+ * This test tries to get the VM to pick a small and unaligned heap size (by using MaxRAM=555) and a
+ * large region size (by using -XX:G1HeapRegionSize=32m). This will fail without the fix for 8013791.
+ */
+public class TestRegionAlignment {
+    public static void main(String[] args) { }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/g1/TestShrinkToOneRegion.java	Wed Jul 05 18:55:08 2017 +0200
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestShrinkToOneRegion.java
+ * @bug 8013872
+ * @summary Shrinking the heap down to one region used to hit an assert
+ * @run main/othervm -XX:+UseG1GC -XX:G1HeapRegionSize=32m -Xmx256m TestShrinkToOneRegion
+ *
+ * Doing a System.gc() without having allocated many objects will shrink the heap.
+ * With a large region size we will shrink the heap to one region.
+ */
+public class TestShrinkToOneRegion {
+    public static void main(String[] args) {
+        System.gc();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/ClassUnload/KeepAliveClass.java	Wed Jul 05 18:55:08 2017 +0200
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test KeepAliveClass
+ * @summary This test case uses a java.lang.Class instance to keep a class alive.
+ * @library /testlibrary /testlibrary/whitebox /runtime/testlibrary
+ * @library classes
+ * @build KeepAliveClass test.Empty
+ * @build ClassUnloadCommon
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -Xmn8m -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI KeepAliveClass
+ */
+
+import java.lang.ref.SoftReference;
+import sun.hotspot.WhiteBox;
+
+/**
+ * Test that verifies that classes are not unloaded when specific types of references are kept to them.
+ */
+public class KeepAliveClass {
+  private static final String className = "test.Empty";
+  private static final WhiteBox wb = WhiteBox.getWhiteBox();
+  public static Object escape = null;
+
+  public static void main(String... args) throws Exception {
+    ClassLoader cl = ClassUnloadCommon.newClassLoader();
+    Class<?> c = cl.loadClass(className);
+    Object o = c.newInstance();
+    o = null; cl = null;
+    escape = c;
+
+    {
+        boolean isAlive = wb.isClassAlive(className);
+        System.out.println("testClass (1) alive: " + isAlive);
+
+        ClassUnloadCommon.failIf(!isAlive, "should be alive");
+    }
+
+    ClassUnloadCommon.triggerUnloading();
+
+    {
+        boolean isAlive = wb.isClassAlive(className);
+        System.out.println("testClass (2) alive: " + isAlive);
+
+        ClassUnloadCommon.failIf(!isAlive, "should be alive");
+    }
+    c = null;
+    escape = null;
+    ClassUnloadCommon.triggerUnloading();
+
+    {
+        boolean isAlive = wb.isClassAlive(className);
+        System.out.println("testClass (3) alive: " + isAlive);
+        ClassUnloadCommon.failIf(isAlive, "should be unloaded");
+    }
+
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/ClassUnload/KeepAliveClassLoader.java	Wed Jul 05 18:55:08 2017 +0200
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test KeepAliveClassLoader
+ * @summary This test case uses a java.lang.ClassLoader instance to keep a class alive.
+ * @library /testlibrary /testlibrary/whitebox /runtime/testlibrary
+ * @library classes
+ * @build KeepAliveClassLoader test.Empty
+ * @build ClassUnloadCommon
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -Xmn8m -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI KeepAliveClassLoader
+ */
+
+import sun.hotspot.WhiteBox;
+
+/**
+ * Test that verifies that classes are not unloaded when specific types of references are kept to them.
+ */
+public class KeepAliveClassLoader {
+  private static final String className = "test.Empty";
+  private static final WhiteBox wb = WhiteBox.getWhiteBox();
+  public static Object escape = null;
+
+
+  public static void main(String... args) throws Exception {
+    ClassLoader cl = ClassUnloadCommon.newClassLoader();
+    Class<?> c = cl.loadClass(className);
+    Object o = c.newInstance();
+    o = null; c = null;
+    escape = cl;
+
+    {
+        boolean isAlive = wb.isClassAlive(className);
+        System.out.println("testClassLoader (1) alive: " + isAlive);
+
+        ClassUnloadCommon.failIf(!isAlive, "should be alive");
+    }
+    ClassUnloadCommon.triggerUnloading();
+
+    {
+        boolean isAlive = wb.isClassAlive(className);
+        System.out.println("testClassLoader (2) alive: " + isAlive);
+
+        ClassUnloadCommon.failIf(!isAlive, "should be alive");
+    }
+    cl = null;
+    escape = null;
+    ClassUnloadCommon.triggerUnloading();
+
+    {
+        boolean isAlive = wb.isClassAlive(className);
+        System.out.println("testClassLoader (3) alive: " + isAlive);
+        ClassUnloadCommon.failIf(isAlive, "should be unloaded");
+    }
+
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/ClassUnload/KeepAliveObject.java	Wed Jul 05 18:55:08 2017 +0200
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test KeepAliveObject
+ * @summary This test case uses a class instance to keep the class alive.
+ * @library /testlibrary /testlibrary/whitebox /runtime/testlibrary
+ * @library classes
+ * @build KeepAliveObject test.Empty
+ * @build ClassUnloadCommon
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -Xmn8m -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI KeepAliveObject
+ */
+
+import sun.hotspot.WhiteBox;
+
+/**
+ * Test that verifies that classes are not unloaded when specific types of references are kept to them.
+ */
+public class KeepAliveObject {
+  private static final String className = "test.Empty";
+  private static final WhiteBox wb = WhiteBox.getWhiteBox();
+  public static Object escape = null;
+
+  public static void main(String... args) throws Exception {
+    ClassLoader cl = ClassUnloadCommon.newClassLoader();
+    Class<?> c = cl.loadClass(className);
+    Object o = c.newInstance();
+    cl = null; c = null;
+    escape = o;
+
+    {
+        boolean isAlive = wb.isClassAlive(className);
+        System.out.println("testObject (1) alive: " + isAlive);
+
+        ClassUnloadCommon.failIf(!isAlive, "should be alive");
+    }
+
+    ClassUnloadCommon.triggerUnloading();
+
+    {
+        boolean isAlive = wb.isClassAlive(className);
+        System.out.println("testObject (2) alive: " + isAlive);
+
+        ClassUnloadCommon.failIf(!isAlive, "should be alive");
+    }
+    o = null;
+    escape = null;
+    ClassUnloadCommon.triggerUnloading();
+
+    {
+        boolean isAlive = wb.isClassAlive(className);
+        System.out.println("testObject (3) alive: " + isAlive);
+        ClassUnloadCommon.failIf(isAlive, "should be unloaded");
+    }
+
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/ClassUnload/KeepAliveSoftReference.java	Wed Jul 05 18:55:08 2017 +0200
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test KeepAliveSoftReference
+ * @summary This test case uses a java.lang.ref.SoftReference referencing a class instance to keep a class alive.
+ * @library /testlibrary /testlibrary/whitebox /runtime/testlibrary
+ * @library classes
+ * @build KeepAliveSoftReference test.Empty
+ * @build ClassUnloadCommon
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -Xmn8m -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI KeepAliveSoftReference
+ */
+
+import java.lang.ref.SoftReference;
+import sun.hotspot.WhiteBox;
+
+/**
+ * Test that verifies that classes are not unloaded when specific types of references are kept to them.
+ */
+public class KeepAliveSoftReference {
+  private static final String className = "test.Empty";
+  private static final WhiteBox wb = WhiteBox.getWhiteBox();
+
+  public static void main(String... args) throws Exception {
+    ClassLoader cl = ClassUnloadCommon.newClassLoader();
+    Class<?> c = cl.loadClass(className);
+    Object o = c.newInstance();
+    SoftReference<Object> sr = new SoftReference(o);
+    o = null; c = null; cl = null;
+
+    {
+        boolean isAlive = wb.isClassAlive(className);
+        System.out.println("testSoftReference (1) alive: " + isAlive);
+        boolean cleared = (sr.get() == null);
+        boolean shouldBeAlive = !cleared;
+        ClassUnloadCommon.failIf(isAlive != shouldBeAlive, "" + isAlive + " != " + shouldBeAlive);
+    }
+
+    ClassUnloadCommon.triggerUnloading();
+
+    {
+        boolean isAlive = wb.isClassAlive(className);
+        System.out.println("testSoftReference (2) alive: " + isAlive);
+        boolean cleared = (sr.get() == null);
+        boolean shouldBeAlive = !cleared;
+        ClassUnloadCommon.failIf(isAlive != shouldBeAlive, "" + isAlive + " != " + shouldBeAlive);
+    }
+    sr.clear();
+    ClassUnloadCommon.triggerUnloading();
+
+    {
+        boolean isAlive = wb.isClassAlive(className);
+        System.out.println("testSoftReference (3) alive: " + isAlive);
+        boolean cleared = (sr.get() == null);
+        boolean shouldBeAlive = !cleared;
+        ClassUnloadCommon.failIf(isAlive != shouldBeAlive, "" + isAlive + " != " + shouldBeAlive);
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/ClassUnload/UnloadTest.java	Wed Jul 05 18:55:08 2017 +0200
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test UnloadTest
+ * @library /runtime/testlibrary /testlibrary /testlibrary/whitebox
+ * @library classes
+ * @build ClassUnloadCommon test.Empty
+ * @build UnloadTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -Xmn8m -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI UnloadTest
+ */
+import sun.hotspot.WhiteBox;
+
+/**
+ * Test that verifies that classes are unloaded when they are no longer reachable.
+ *
+ * The test creates a class loader, uses the loader to load a class and creates an instance
+ * of that class. The it nulls out all the references to the instance, class and class loader
+ * and tries to trigger class unloading. Then it verifies that the class is no longer
+ * loaded by the VM.
+ */
+public class UnloadTest {
+    private static String className = "test.Empty";
+
+    public static void main(String... args) throws Exception {
+       run();
+    }
+
+    private static void run() throws Exception {
+        final WhiteBox wb = WhiteBox.getWhiteBox();
+
+        ClassUnloadCommon.failIf(wb.isClassAlive(className), "is not expected to be alive yet");
+
+        ClassLoader cl = ClassUnloadCommon.newClassLoader();
+        Class<?> c = cl.loadClass(className);
+        Object o = c.newInstance();
+
+        ClassUnloadCommon.failIf(!wb.isClassAlive(className), "should be live here");
+
+        cl = null; c = null; o = null;
+        ClassUnloadCommon.triggerUnloading();
+        ClassUnloadCommon.failIf(wb.isClassAlive(className), "should have been unloaded");
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/ClassUnload/classes/test/Empty.java	Wed Jul 05 18:55:08 2017 +0200
@@ -0,0 +1,5 @@
+package test;
+
+public class Empty {
+public String toString() { return "nothing"; }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/ReleaseCommittedMemory.java	Wed Jul 05 18:55:08 2017 +0200
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8013120
+ * @summary Release committed memory and make sure NMT handles it correctly
+ * @key nmt regression
+ * @library /testlibrary /testlibrary/whitebox
+ * @build ReleaseCommittedMemory
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail ReleaseCommittedMemory
+ */
+
+import sun.hotspot.WhiteBox;
+
+public class ReleaseCommittedMemory {
+
+  public static void main(String args[]) throws Exception {
+    WhiteBox wb = WhiteBox.getWhiteBox();
+    long reserveSize = 256 * 1024;
+    long addr;
+
+    addr = wb.NMTReserveMemory(reserveSize);
+    wb.NMTCommitMemory(addr, 128*1024);
+    wb.NMTReleaseMemory(addr, reserveSize);
+    wb.NMTWaitForDataMerge();
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/testlibrary/ClassUnloadCommon.java	Wed Jul 05 18:55:08 2017 +0200
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.io.File;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+
+public class ClassUnloadCommon {
+    public static class TestFailure extends RuntimeException {
+        TestFailure(String msg) {
+            super(msg);
+        }
+    }
+
+    public static void failIf(boolean value, String msg) {
+        if (value) throw new TestFailure("Test failed: " + msg);
+    }
+
+    private static volatile Object dummy = null;
+    private static void allocateMemory(int kilobytes) {
+        ArrayList<byte[]> l = new ArrayList<>();
+        dummy = l;
+        for (int i = kilobytes; i > 0; i -= 1) {
+            l.add(new byte[1024]);
+        }
+        l = null;
+        dummy = null;
+    }
+
+    public static void triggerUnloading() {
+        allocateMemory(16 * 1024); // yg size is 8m with cms, force young collection
+        System.gc();
+    }
+
+    public static ClassLoader newClassLoader() {
+        try {
+            return new URLClassLoader(new URL[] {
+                Paths.get(System.getProperty("test.classes",".") + File.separatorChar + "classes").toUri().toURL(),
+            }, null);
+        } catch (MalformedURLException e){
+            throw new RuntimeException("Unexpected URL conversion failure", e);
+        }
+    }
+
+}
--- a/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Wed Jul 05 18:54:28 2017 +0200
+++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Wed Jul 05 18:55:08 2017 +0200
@@ -61,6 +61,9 @@
     registerNatives();
   }
 
+  // Arguments
+  public native void printHeapSizes();
+
   // Memory
   public native long getObjectAddress(Object o);
   public native int  getHeapOopSize();
--- a/jaxp/.hgtags	Wed Jul 05 18:54:28 2017 +0200
+++ b/jaxp/.hgtags	Wed Jul 05 18:55:08 2017 +0200
@@ -210,3 +210,4 @@
 ca71ec37b2efc9c3f0971ebabb3a6eb1213d76de jdk8-b86
 eddbc8ad2435a89f64729512337c9f2669e4dd85 jdk8-b87
 7122f7bb0fcc8a39e5254402119b2ee3fa0ad313 jdk8-b88
+893d2ba8bbea3a8d090e51d8eaea285b390789ea jdk8-b89
--- a/jaxws/.hgtags	Wed Jul 05 18:54:28 2017 +0200
+++ b/jaxws/.hgtags	Wed Jul 05 18:55:08 2017 +0200
@@ -210,3 +210,4 @@
 a5e7c2f093c9996ab3419db1565094a07b059e9c jdk8-b86
 72e03566f0a61282cc48ebc869803b256cccd66c jdk8-b87
 24fa5452e5d4e9df8b85196283275a6ca4b4adb4 jdk8-b88
+88838e08e4ef6a254867c8126070a1975683108d jdk8-b89
--- a/jdk/.hgtags	Wed Jul 05 18:54:28 2017 +0200
+++ b/jdk/.hgtags	Wed Jul 05 18:55:08 2017 +0200
@@ -210,3 +210,4 @@
 7989cd0cc3a9149864589438ee2c949015d8aa9a jdk8-b86
 d5228e624826a10ccc5b05f30ad8d839b58fe48d jdk8-b87
 8dbb4b159e04de3c447c9242c70505e71f8624c7 jdk8-b88
+845025546e35519fbb8970e79fc2a834063a5e19 jdk8-b89
--- a/jdk/src/share/classes/java/util/Base64.java	Wed Jul 05 18:54:28 2017 +0200
+++ b/jdk/src/share/classes/java/util/Base64.java	Wed Jul 05 18:55:08 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/jdk/src/share/classes/java/util/StringJoiner.java	Wed Jul 05 18:54:28 2017 +0200
+++ b/jdk/src/share/classes/java/util/StringJoiner.java	Wed Jul 05 18:55:08 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/jdk/test/java/lang/CharSequence/DefaultTest.java	Wed Jul 05 18:54:28 2017 +0200
+++ b/jdk/test/java/lang/CharSequence/DefaultTest.java	Wed Jul 05 18:55:08 2017 +0200
@@ -1,12 +1,10 @@
 /*
- * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
  *
  * This code is distributed in the hope that it will be useful, but WITHOUT
  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/jdk/test/java/util/StringJoiner/StringJoinerTest.java	Wed Jul 05 18:54:28 2017 +0200
+++ b/jdk/test/java/util/StringJoiner/StringJoinerTest.java	Wed Jul 05 18:55:08 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/langtools/.hgtags	Wed Jul 05 18:54:28 2017 +0200
+++ b/langtools/.hgtags	Wed Jul 05 18:55:08 2017 +0200
@@ -210,3 +210,4 @@
 6ab578e141dfd17c4dc03869bb204aafa490c9f4 jdk8-b86
 1329f9c38d93c8caf339d7687df8371d06fe9e56 jdk8-b87
 a1e10f3adc47c8602a72e43a41403a642e73e0b1 jdk8-b88
+ec434cfd2752a7742c875c2fe7d556d8b81c0f3a jdk8-b89
--- a/nashorn/.hgtags	Wed Jul 05 18:54:28 2017 +0200
+++ b/nashorn/.hgtags	Wed Jul 05 18:55:08 2017 +0200
@@ -198,3 +198,4 @@
 002ad9d6735f36d1204e133324c73058c8abb1b0 jdk8-b86
 774aeaa89bc15f4365e3c2fc36f6a3a0da70ba28 jdk8-b87
 40c107d1ae6f81a62e35dfe618b827897405e9b2 jdk8-b88
+45ce27fbe2720d80070095c0db7344ec41e2833d jdk8-b89