Merge
authoramurillo
Fri, 26 Sep 2014 01:40:31 -0700
changeset 26854 781be83089fb
parent 26786 f0c5e4b732da (current diff)
parent 26853 a6be7aadcf53 (diff)
child 26855 8af850ba9862
child 26910 253efabfd968
child 26911 8f2c7a83220f
child 26930 03e80d01349b
child 26932 33d6fa41d290
Merge
hotspot/make/jprt.properties
hotspot/src/share/vm/runtime/arguments_ext.cpp
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java	Fri Sep 26 01:40:31 2014 -0700
@@ -32,12 +32,10 @@
 import sun.jvm.hotspot.utilities.*;
 
 public class CodeCache {
-  private static AddressField       heapField;
-  private static AddressField       scavengeRootNMethodsField;
+  private static GrowableArray<CodeHeap> heapArray;
+  private static AddressField scavengeRootNMethodsField;
   private static VirtualConstructor virtualConstructor;
 
-  private CodeHeap heap;
-
   static {
     VM.registerVMInitializedObserver(new Observer() {
         public void update(Observable o, Object data) {
@@ -49,7 +47,10 @@
   private static synchronized void initialize(TypeDataBase db) {
     Type type = db.lookupType("CodeCache");
 
-    heapField = type.getAddressField("_heap");
+    // Get array of CodeHeaps
+    AddressField heapsField = type.getAddressField("_heaps");
+    heapArray = GrowableArray.create(heapsField.getValue(), new StaticBaseConstructor<CodeHeap>(CodeHeap.class));
+
     scavengeRootNMethodsField = type.getAddressField("_scavenge_root_nmethods");
 
     virtualConstructor = new VirtualConstructor(db);
@@ -67,16 +68,17 @@
     }
   }
 
-  public CodeCache() {
-    heap = (CodeHeap) VMObjectFactory.newObject(CodeHeap.class, heapField.getValue());
-  }
-
   public NMethod scavengeRootMethods() {
     return (NMethod) VMObjectFactory.newObject(NMethod.class, scavengeRootNMethodsField.getValue());
   }
 
   public boolean contains(Address p) {
-    return getHeap().contains(p);
+    for (int i = 0; i < heapArray.length(); ++i) {
+      if (heapArray.at(i).contains(p)) {
+        return true;
+      }
+    }
+    return false;
   }
 
   /** When VM.getVM().isDebugging() returns true, this behaves like
@@ -97,14 +99,24 @@
 
   public CodeBlob findBlobUnsafe(Address start) {
     CodeBlob result = null;
+    CodeHeap containing_heap = null;
+    for (int i = 0; i < heapArray.length(); ++i) {
+      if (heapArray.at(i).contains(start)) {
+        containing_heap = heapArray.at(i);
+        break;
+      }
+    }
+    if (containing_heap == null) {
+      return null;
+    }
 
     try {
-      result = (CodeBlob) virtualConstructor.instantiateWrapperFor(getHeap().findStart(start));
+      result = (CodeBlob) virtualConstructor.instantiateWrapperFor(containing_heap.findStart(start));
     }
     catch (WrongTypeException wte) {
       Address cbAddr = null;
       try {
-        cbAddr = getHeap().findStart(start);
+        cbAddr = containing_heap.findStart(start);
       }
       catch (Exception findEx) {
         findEx.printStackTrace();
@@ -167,31 +179,32 @@
   }
 
   public void iterate(CodeCacheVisitor visitor) {
-    CodeHeap heap = getHeap();
-    Address ptr = heap.begin();
-    Address end = heap.end();
-
-    visitor.prologue(ptr, end);
+    visitor.prologue(lowBound(), highBound());
     CodeBlob lastBlob = null;
-    while (ptr != null && ptr.lessThan(end)) {
-      try {
-        // Use findStart to get a pointer inside blob other findBlob asserts
-        CodeBlob blob = findBlobUnsafe(heap.findStart(ptr));
-        if (blob != null) {
-          visitor.visit(blob);
-          if (blob == lastBlob) {
-            throw new InternalError("saw same blob twice");
+
+    for (int i = 0; i < heapArray.length(); ++i) {
+      CodeHeap current_heap = heapArray.at(i);
+      Address ptr = current_heap.begin();
+      while (ptr != null && ptr.lessThan(current_heap.end())) {
+        try {
+          // Use findStart to get a pointer inside blob other findBlob asserts
+          CodeBlob blob = findBlobUnsafe(current_heap.findStart(ptr));
+          if (blob != null) {
+            visitor.visit(blob);
+            if (blob == lastBlob) {
+              throw new InternalError("saw same blob twice");
+            }
+            lastBlob = blob;
           }
-          lastBlob = blob;
+        } catch (RuntimeException e) {
+          e.printStackTrace();
         }
-      } catch (RuntimeException e) {
-        e.printStackTrace();
+        Address next = current_heap.nextBlock(ptr);
+        if (next != null && next.lessThan(ptr)) {
+          throw new InternalError("pointer moved backwards");
+        }
+        ptr = next;
       }
-      Address next = heap.nextBlock(ptr);
-      if (next != null && next.lessThan(ptr)) {
-        throw new InternalError("pointer moved backwards");
-      }
-      ptr = next;
     }
     visitor.epilogue();
   }
@@ -200,7 +213,23 @@
   // Internals only below this point
   //
 
-  private CodeHeap getHeap() {
-    return heap;
+  private Address lowBound() {
+    Address low = heapArray.at(0).begin();
+    for (int i = 1; i < heapArray.length(); ++i) {
+      if (heapArray.at(i).begin().lessThan(low)) {
+        low = heapArray.at(i).begin();
+      }
+    }
+    return low;
+  }
+
+  private Address highBound() {
+    Address high = heapArray.at(0).end();
+    for (int i = 1; i < heapArray.length(); ++i) {
+      if (heapArray.at(i).end().greaterThan(high)) {
+        high = heapArray.at(i).end();
+      }
+    }
+    return high;
   }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1Allocator.java	Fri Sep 26 01:40:31 2014 -0700
@@ -0,0 +1,40 @@
+package sun.jvm.hotspot.gc_implementation.g1;
+
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+public class G1Allocator extends VMObject {
+
+  //size_t _summary_bytes_used;
+  static private CIntegerField summaryBytesUsedField;
+
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+      public void update(Observable o, Object data) {
+        initialize(VM.getVM().getTypeDataBase());
+      }
+    });
+  }
+
+  static private synchronized void initialize(TypeDataBase db) {
+    Type type = db.lookupType("G1Allocator");
+
+    summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
+  }
+
+  public long getSummaryBytes() {
+    return summaryBytesUsedField.getValue(addr);
+  }
+
+  public G1Allocator(Address addr) {
+    super(addr);
+
+  }
+}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java	Fri Sep 26 01:40:31 2014 -0700
@@ -36,7 +36,6 @@
 import sun.jvm.hotspot.runtime.VM;
 import sun.jvm.hotspot.runtime.VMObjectFactory;
 import sun.jvm.hotspot.types.AddressField;
-import sun.jvm.hotspot.types.CIntegerField;
 import sun.jvm.hotspot.types.Type;
 import sun.jvm.hotspot.types.TypeDataBase;
 
@@ -47,8 +46,8 @@
     static private long hrmFieldOffset;
     // MemRegion _g1_reserved;
     static private long g1ReservedFieldOffset;
-    // size_t _summary_bytes_used;
-    static private CIntegerField summaryBytesUsedField;
+    // G1Allocator* _allocator
+    static private AddressField g1Allocator;
     // G1MonitoringSupport* _g1mm;
     static private AddressField g1mmField;
     // HeapRegionSet _old_set;
@@ -68,7 +67,7 @@
         Type type = db.lookupType("G1CollectedHeap");
 
         hrmFieldOffset = type.getField("_hrm").getOffset();
-        summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
+        g1Allocator = type.getAddressField("_allocator");
         g1mmField = type.getAddressField("_g1mm");
         oldSetFieldOffset = type.getField("_old_set").getOffset();
         humongousSetFieldOffset = type.getField("_humongous_set").getOffset();
@@ -79,7 +78,7 @@
     }
 
     public long used() {
-        return summaryBytesUsedField.getValue(addr);
+        return allocator().getSummaryBytes();
     }
 
     public long n_regions() {
@@ -97,6 +96,11 @@
         return (G1MonitoringSupport) VMObjectFactory.newObject(G1MonitoringSupport.class, g1mmAddr);
     }
 
+    public G1Allocator allocator() {
+        Address g1AllocatorAddr = g1Allocator.getValue(addr);
+        return (G1Allocator) VMObjectFactory.newObject(G1Allocator.class, g1AllocatorAddr);
+    }
+
     public HeapRegionSetBase oldSet() {
         Address oldSetAddr = addr.addOffsetTo(oldSetFieldOffset);
         return (HeapRegionSetBase) VMObjectFactory.newObject(HeapRegionSetBase.class,
--- a/hotspot/make/bsd/makefiles/vm.make	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/make/bsd/makefiles/vm.make	Fri Sep 26 01:40:31 2014 -0700
@@ -234,10 +234,10 @@
 
 vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
 
-mapfile : $(MAPFILE) vm.def
+mapfile : $(MAPFILE) vm.def mapfile_ext
 	rm -f $@
 	awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE")	\
-                 { system ("cat vm.def"); }		\
+                 { system ("cat mapfile_ext"); system ("cat vm.def"); } \
                else					\
                  { print $$0 }				\
              }' > $@ < $(MAPFILE)
@@ -249,6 +249,13 @@
 vm.def: $(Res_Files) $(Obj_Files)
 	sh $(GAMMADIR)/make/bsd/makefiles/build_vm_def.sh *.o > $@
 
+mapfile_ext:
+	rm -f $@
+	touch $@
+	if [ -f $(HS_ALT_MAKE)/bsd/makefiles/mapfile-ext ]; then \
+	  cat $(HS_ALT_MAKE)/bsd/makefiles/mapfile-ext > $@; \
+	fi
+
 STATIC_CXX = false
 
 ifeq ($(LINK_INTO),AOUT)
@@ -265,6 +272,8 @@
     LFLAGS_VM += -Xlinker -rpath -Xlinker @loader_path/.
     LFLAGS_VM += -Xlinker -rpath -Xlinker @loader_path/..
     LFLAGS_VM += -Xlinker -install_name -Xlinker @rpath/$(@F)
+  else
+    LFLAGS_VM                += -Wl,-z,defs
   endif
 
   # JVM is statically linked with libgcc[_s] and libstdc++; this is needed to
--- a/hotspot/make/excludeSrc.make	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/make/excludeSrc.make	Fri Sep 26 01:40:31 2014 -0700
@@ -21,6 +21,9 @@
 # questions.
 #
 #
+
+include $(GAMMADIR)/make/altsrc.make
+
 ifeq ($(INCLUDE_JVMTI), false)
       CXXFLAGS += -DINCLUDE_JVMTI=0
       CFLAGS += -DINCLUDE_JVMTI=0
@@ -78,12 +81,12 @@
       CXXFLAGS += -DINCLUDE_ALL_GCS=0
       CFLAGS += -DINCLUDE_ALL_GCS=0
 
-      gc_impl := $(GAMMADIR)/src/share/vm/gc_implementation
-      gc_exclude :=							\
-	$(notdir $(wildcard $(gc_impl)/concurrentMarkSweep/*.cpp))	\
-	$(notdir $(wildcard $(gc_impl)/g1/*.cpp))			\
-	$(notdir $(wildcard $(gc_impl)/parallelScavenge/*.cpp))		\
-	$(notdir $(wildcard $(gc_impl)/parNew/*.cpp))
+      gc_impl := $(HS_COMMON_SRC)/share/vm/gc_implementation
+      gc_impl_alt := $(HS_ALT_SRC)/share/vm/gc_implementation
+      gc_subdirs := concurrentMarkSweep g1 parallelScavenge parNew
+      gc_exclude := $(foreach gc,$(gc_subdirs),				\
+		     $(notdir $(wildcard $(gc_impl)/$(gc)/*.cpp))	\
+		     $(notdir $(wildcard $(gc_impl_alt)/$(gc)/*.cpp)))
       Src_Files_EXCLUDE += $(gc_exclude)
 
       # Exclude everything in $(gc_impl)/shared except the files listed
--- a/hotspot/make/jprt.properties	Wed Jul 05 20:02:40 2017 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,388 +0,0 @@
-#
-# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-#
-
-# Properties for jprt
-
-# All build result bundles are full jdks.
-jprt.need.sibling.build=false
-
-# At submit time, the release supplied will be in jprt.submit.release
-#    and will be one of the official release names defined in jprt.
-#    jprt supports property value expansion using ${property.name} syntax.
-
-# This tells jprt what default release we want to build
-
-jprt.hotspot.default.release=jdk9
-
-jprt.tools.default.release=${jprt.submit.option.release?${jprt.submit.option.release}:${jprt.hotspot.default.release}}
-
-# Disable syncing the source after builds and tests are done.
-
-jprt.sync.push=false
-
-# Note: we want both embedded releases and regular releases to build and test
-#       all platforms so that regressions are not introduced (eg. change to
-#       common code by SE breaks PPC/ARM; change to common code by SE-E breaks
-#       sparc etc.
-
-# Define the Solaris platforms we want for the various releases
-jprt.my.solaris.sparcv9.jdk9=solaris_sparcv9_5.11
-jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
-
-jprt.my.solaris.x64.jdk9=solaris_x64_5.11
-jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
-
-jprt.my.linux.i586.jdk9=linux_i586_2.6
-jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
-
-jprt.my.linux.x64.jdk9=linux_x64_2.6
-jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
-
-jprt.my.linux.ppc.jdk9=linux_ppc_2.6
-jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}}
-
-jprt.my.linux.ppcv2.jdk9=linux_ppcv2_2.6
-jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}}
-
-jprt.my.linux.armvfpsflt.jdk9=linux_armvfpsflt_2.6
-jprt.my.linux.armvfpsflt=${jprt.my.linux.armvfpsflt.${jprt.tools.default.release}}
-
-jprt.my.linux.armvfphflt.jdk9=linux_armvfphflt_2.6
-jprt.my.linux.armvfphflt=${jprt.my.linux.armvfphflt.${jprt.tools.default.release}}
-
-# The ARM GP vfp-sflt build is not currently supported
-#jprt.my.linux.armvs.jdk9=linux_armvs_2.6
-#jprt.my.linux.armvs=${jprt.my.linux.armvs.${jprt.tools.default.release}}
-
-jprt.my.linux.armvh.jdk9=linux_armvh_2.6
-jprt.my.linux.armvh=${jprt.my.linux.armvh.${jprt.tools.default.release}}
-
-jprt.my.linux.armsflt.jdk9=linux_armsflt_2.6
-jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}}
-
-jprt.my.macosx.x64.jdk9=macosx_x64_10.7
-jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
-
-jprt.my.windows.i586.jdk9=windows_i586_6.1
-jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
-
-jprt.my.windows.x64.jdk9=windows_x64_6.1
-jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
-
-# Standard list of jprt build targets for this source tree
-
-jprt.build.targets.standard= \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}, \
-    ${jprt.my.linux.i586}-{product|fastdebug}, \
-    ${jprt.my.linux.x64}-{product|fastdebug}, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}, \
-    ${jprt.my.windows.i586}-{product|fastdebug}, \
-    ${jprt.my.windows.x64}-{product|fastdebug}, \
-    ${jprt.my.linux.armvh}-{product|fastdebug}
-
-jprt.build.targets.open= \
-    ${jprt.my.solaris.x64}-{debugOpen}, \
-    ${jprt.my.linux.x64}-{productOpen}
-
-jprt.build.targets.embedded= \
-    ${jprt.my.linux.i586}-{productEmb|fastdebugEmb}, \
-    ${jprt.my.linux.ppc}-{productEmb|fastdebugEmb}, \
-    ${jprt.my.linux.ppcv2}-{productEmb|fastdebugEmb}, \
-    ${jprt.my.linux.armvfpsflt}-{productEmb|fastdebugEmb}, \
-    ${jprt.my.linux.armvfphflt}-{productEmb|fastdebugEmb}, \
-    ${jprt.my.linux.armsflt}-{productEmb|fastdebugEmb}
-
-jprt.build.targets.all=${jprt.build.targets.standard}, \
-    ${jprt.build.targets.embedded}, ${jprt.build.targets.open}
-
-jprt.build.targets.jdk9=${jprt.build.targets.all}
-jprt.build.targets=${jprt.build.targets.${jprt.tools.default.release}}
-
-# Subset lists of test targets for this source tree
-
-jprt.my.solaris.sparcv9.test.targets= \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98_nontiered, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-scimark, \
-    ${jprt.my.solaris.sparcv9}-product-c2-runThese8, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_SerialGC, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_CMS, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_G1, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_SerialGC, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParallelGC, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParNewGC, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_CMS, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_G1, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParOldGC, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_default_nontiered, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_SerialGC, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_ParallelGC, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_CMS, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_G1, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_ParOldGC
-
-jprt.my.solaris.x64.test.targets= \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jvm98, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-scimark, \
-    ${jprt.my.solaris.x64}-product-c2-runThese8, \
-    ${jprt.my.solaris.x64}-product-c2-runThese8_Xcomp_lang, \
-    ${jprt.my.solaris.x64}-product-c2-runThese8_Xcomp_vm, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_G1, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_CMS, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_G1, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_SerialGC, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_CMS, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_G1, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParOldGC
-
-jprt.my.linux.i586.test.targets = \
-    ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-scimark, \
-    ${jprt.my.linux.i586}-product-c1-runThese8_Xcomp_lang, \
-    ${jprt.my.linux.i586}-product-c1-runThese8_Xcomp_vm, \
-    ${jprt.my.linux.i586}-fastdebug-c1-runThese8_Xshare, \
-    ${jprt.my.linux.i586}-fastdebug-c2-runThese8_Xcomp_lang, \
-    ${jprt.my.linux.i586}-fastdebug-c2-runThese8_Xcomp_vm, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \
-    ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_SerialGC, \
-    ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParallelGC, \
-    ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParNewGC, \
-    ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_CMS, \
-    ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_G1, \
-    ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParOldGC, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_SerialGC, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-c2-jbb_default_nontiered, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_ParallelGC, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_CMS, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_G1, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_ParOldGC
-
-jprt.my.linux.x64.test.targets = \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-scimark, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_G1, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_CMS, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_G1, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_G1, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParOldGC
-
-jprt.my.macosx.x64.test.targets = \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-scimark, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_G1, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_CMS, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_G1, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_G1, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParOldGC
-
-jprt.my.windows.i586.test.targets = \
-    ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
-    ${jprt.my.windows.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
-    ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-scimark, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-runThese8, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-runThese8_Xcomp_lang, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-runThese8_Xcomp_vm, \
-    ${jprt.my.windows.i586}-fastdebug-c1-runThese8_Xshare, \
-    ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \
-    ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \
-    ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \
-    ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \
-    ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \
-    ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_SerialGC, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParallelGC, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParNewGC, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_CMS, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_G1, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParOldGC, \
-    ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jbb_default, \
-    ${jprt.my.windows.i586}-{product|fastdebug}-c2-jbb_default_nontiered, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-jbb_ParallelGC, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-jbb_CMS, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-jbb_G1, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-jbb_ParOldGC
-
-jprt.my.windows.x64.test.targets = \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-jvm98, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-scimark, \
-    ${jprt.my.windows.x64}-product-c2-runThese8, \
-    ${jprt.my.windows.x64}-product-c2-runThese8_Xcomp_lang, \
-    ${jprt.my.windows.x64}-product-c2-runThese8_Xcomp_vm, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_G1, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_CMS, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_G1, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-jbb_default, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
-    ${jprt.my.windows.x64}-product-c2-jbb_CMS, \
-    ${jprt.my.windows.x64}-product-c2-jbb_ParallelGC, \
-    ${jprt.my.windows.x64}-product-c2-jbb_G1, \
-    ${jprt.my.windows.x64}-product-c2-jbb_ParOldGC
-
-# Some basic "smoke" tests for OpenJDK builds
-jprt.test.targets.open = \
-    ${jprt.my.solaris.x64}-{productOpen|fastdebugOpen}-c2-jvm98, \
-    ${jprt.my.linux.x64}-{productOpen|fastdebugOpen}-c2-jvm98
-
-# Testing for actual embedded builds is different to standard
-jprt.my.linux.i586.test.targets.embedded = \
-    linux_i586_2.6-product-c1-scimark
-
-# The complete list of test targets for jprt
-# Note: no PPC or ARM tests at this stage
-
-jprt.test.targets.standard = \
-  ${jprt.my.linux.i586.test.targets.embedded}, \
-  ${jprt.my.solaris.sparcv9.test.targets}, \
-  ${jprt.my.solaris.x64.test.targets}, \
-  ${jprt.my.linux.i586.test.targets}, \
-  ${jprt.my.linux.x64.test.targets}, \
-  ${jprt.my.macosx.x64.test.targets}, \
-  ${jprt.my.windows.i586.test.targets}, \
-  ${jprt.my.windows.x64.test.targets}, \
-  ${jprt.test.targets.open}
-
-jprt.test.targets.embedded= 		\
-  ${jprt.my.linux.i586.test.targets.embedded}, \
-  ${jprt.my.solaris.sparcv9.test.targets}, \
-  ${jprt.my.solaris.x64.test.targets}, \
-  ${jprt.my.linux.x64.test.targets}, \
-  ${jprt.my.windows.i586.test.targets}, \
-  ${jprt.my.windows.x64.test.targets}
-
-jprt.test.targets.jdk9=${jprt.test.targets.standard}
-jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}}
-
-# The default test/Makefile targets that should be run
-
-#jprt.make.rule.test.targets=*-product-*-packtest
-
-jprt.make.rule.test.targets.standard.client = \
-  ${jprt.my.linux.i586}-*-c1-clienttest, \
-  ${jprt.my.windows.i586}-*-c1-clienttest
-
-jprt.make.rule.test.targets.standard.server = \
-  ${jprt.my.solaris.sparcv9}-*-c2-servertest, \
-  ${jprt.my.solaris.x64}-*-c2-servertest, \
-  ${jprt.my.linux.i586}-*-c2-servertest, \
-  ${jprt.my.linux.x64}-*-c2-servertest, \
-  ${jprt.my.macosx.x64}-*-c2-servertest, \
-  ${jprt.my.windows.i586}-*-c2-servertest, \
-  ${jprt.my.windows.x64}-*-c2-servertest
-
-jprt.make.rule.test.targets.standard.internalvmtests = \
-  ${jprt.my.solaris.sparcv9}-fastdebug-c2-internalvmtests, \
-  ${jprt.my.solaris.x64}-fastdebug-c2-internalvmtests, \
-  ${jprt.my.linux.i586}-fastdebug-c2-internalvmtests, \
-  ${jprt.my.linux.x64}-fastdebug-c2-internalvmtests, \
-  ${jprt.my.macosx.x64}-fastdebug-c2-internalvmtests, \
-  ${jprt.my.windows.i586}-fastdebug-c2-internalvmtests, \
-  ${jprt.my.windows.x64}-fastdebug-c2-internalvmtests
-
-jprt.make.rule.test.targets.standard.reg.group = \
-  ${jprt.my.solaris.sparcv9}-fastdebug-c2-GROUP, \
-  ${jprt.my.solaris.x64}-fastdebug-c2-GROUP, \
-  ${jprt.my.linux.i586}-fastdebug-c2-GROUP, \
-  ${jprt.my.linux.x64}-fastdebug-c2-GROUP, \
-  ${jprt.my.macosx.x64}-fastdebug-c2-GROUP, \
-  ${jprt.my.windows.i586}-fastdebug-c2-GROUP, \
-  ${jprt.my.windows.x64}-fastdebug-c2-GROUP, \
-  ${jprt.my.linux.i586}-fastdebug-c1-GROUP, \
-  ${jprt.my.windows.i586}-fastdebug-c1-GROUP
-
-jprt.make.rule.test.targets.standard = \
-  ${jprt.make.rule.test.targets.standard.client}, \
-  ${jprt.make.rule.test.targets.standard.server}, \
-  ${jprt.make.rule.test.targets.standard.internalvmtests}, \
-  ${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_wbapitest}, \
-  ${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_compiler}, \
-  ${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_gc}, \
-  ${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_runtime}, \
-  ${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_runtime_closed}, \
-  ${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_serviceability}
-
-jprt.make.rule.test.targets.embedded = \
-  ${jprt.make.rule.test.targets.standard.client}
-
-jprt.make.rule.test.targets.jdk9=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}}
-
-# 7155453: Work-around to prevent popups on OSX from blocking test completion
-# but the work-around is added to all platforms to be consistent
-jprt.jbb.options=-Djava.awt.headless=true
--- a/hotspot/make/linux/makefiles/vm.make	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/make/linux/makefiles/vm.make	Fri Sep 26 01:40:31 2014 -0700
@@ -227,10 +227,10 @@
 
 vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
 
-mapfile : $(MAPFILE) vm.def
+mapfile : $(MAPFILE) vm.def mapfile_ext
 	rm -f $@
 	awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE")	\
-                 { system ("cat vm.def"); }		\
+                 { system ("cat mapfile_ext"); system ("cat vm.def"); } \
                else					\
                  { print $$0 }				\
              }' > $@ < $(MAPFILE)
@@ -242,6 +242,13 @@
 vm.def: $(Res_Files) $(Obj_Files)
 	sh $(GAMMADIR)/make/linux/makefiles/build_vm_def.sh *.o > $@
 
+mapfile_ext:
+	rm -f $@
+	touch $@
+	if [ -f $(HS_ALT_MAKE)/linux/makefiles/mapfile-ext ]; then \
+	  cat $(HS_ALT_MAKE)/linux/makefiles/mapfile-ext > $@; \
+	fi
+
 ifeq ($(JVM_VARIANT_ZEROSHARK), true)
   STATIC_CXX = false
 else
@@ -261,6 +268,7 @@
   LIBJVM_MAPFILE$(LDNOMAP) = mapfile_reorder
   LFLAGS_VM$(LDNOMAP)      += $(MAPFLAG:FILENAME=$(LIBJVM_MAPFILE))
   LFLAGS_VM                += $(SONAMEFLAG:SONAME=$(LIBJVM))
+  LFLAGS_VM                += -Wl,-z,defs
 
   # JVM is statically linked with libgcc[_s] and libstdc++; this is needed to
   # get around library dependency and compatibility issues. Must use gcc not
--- a/hotspot/make/solaris/makefiles/buildtree.make	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/make/solaris/makefiles/buildtree.make	Fri Sep 26 01:40:31 2014 -0700
@@ -258,6 +258,8 @@
 	    echo && echo "ZIP_DEBUGINFO_FILES = $(ZIP_DEBUGINFO_FILES)"; \
 	[ -n "$(ZIPEXE)" ] && \
 	    echo && echo "ZIPEXE = $(ZIPEXE)"; \
+	[ -n "$(HS_ALT_MAKE)" ] && \
+	    echo && echo "HS_ALT_MAKE = $(HS_ALT_MAKE)"; \
 	[ -n "$(HOTSPOT_EXTRA_SYSDEFS)" ] && \
 	    echo && \
 	    echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
--- a/hotspot/make/solaris/makefiles/mapfile-vers-COMPILER1	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/make/solaris/makefiles/mapfile-vers-COMPILER1	Fri Sep 26 01:40:31 2014 -0700
@@ -29,7 +29,7 @@
 SUNWprivate_1.1 {
         global:
                 # Dtrace support
-                __1cJCodeCacheF_heap_;
+                __1cJCodeCacheG_heaps_;
                 __1cIUniverseO_collectedHeap_;
                 __1cGMethodG__vtbl_;
                 __1cHnmethodG__vtbl_;
--- a/hotspot/make/solaris/makefiles/mapfile-vers-COMPILER2	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/make/solaris/makefiles/mapfile-vers-COMPILER2	Fri Sep 26 01:40:31 2014 -0700
@@ -29,7 +29,7 @@
 SUNWprivate_1.1 {
         global:
                 # Dtrace support
-                __1cJCodeCacheF_heap_;
+                __1cJCodeCacheG_heaps_;
                 __1cIUniverseO_collectedHeap_;
                 __1cGMethodG__vtbl_;
                 __1cHnmethodG__vtbl_;
--- a/hotspot/make/solaris/makefiles/mapfile-vers-TIERED	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/make/solaris/makefiles/mapfile-vers-TIERED	Fri Sep 26 01:40:31 2014 -0700
@@ -29,7 +29,7 @@
 SUNWprivate_1.1 {
         global:
                 # Dtrace support
-                __1cJCodeCacheF_heap_;
+                __1cJCodeCacheG_heaps_;
                 __1cIUniverseO_collectedHeap_;
                 __1cGMethodG__vtbl_;
                 __1cHnmethodG__vtbl_;
--- a/hotspot/make/solaris/makefiles/vm.make	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/make/solaris/makefiles/vm.make	Fri Sep 26 01:40:31 2014 -0700
@@ -130,7 +130,7 @@
 # Not sure what the 'designed for' comment is referring too above.
 #   The order may not be too significant anymore, but I have placed this
 #   older libm before libCrun, just to make sure it's found and used first.
-LIBS += -lsocket -lsched -ldl $(LIBM) -lCrun -lthread -ldoor -lc -ldemangle
+LIBS += -lsocket -lsched -ldl $(LIBM) -lCrun -lthread -ldoor -lc -ldemangle -lnsl
 else
 ifeq ($(COMPILER_REV_NUMERIC), 502)
 # SC6.1 has it's own libm.so: specifying anything else provokes a name conflict.
@@ -249,11 +249,12 @@
 
 vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
 
-mapfile : $(MAPFILE) $(MAPFILE_DTRACE_OPT) vm.def
+mapfile : $(MAPFILE) $(MAPFILE_DTRACE_OPT) vm.def mapfile_ext
 	rm -f $@
 	cat $(MAPFILE) $(MAPFILE_DTRACE_OPT) \
 	    | $(NAWK) '{                                         \
 	              if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") {  \
+	                  system ("cat mapfile_ext");            \
 	                  system ("cat vm.def");                 \
 	              } else {                                   \
 	                  print $$0;                             \
@@ -267,6 +268,13 @@
 vm.def: $(Obj_Files)
 	sh $(GAMMADIR)/make/solaris/makefiles/build_vm_def.sh *.o > $@
 
+mapfile_ext:
+	rm -f $@
+	touch $@
+	if [ -f $(HS_ALT_MAKE)/solaris/makefiles/mapfile-ext ]; then \
+	  cat $(HS_ALT_MAKE)/solaris/makefiles/mapfile-ext > $@; \
+	fi
+
 ifeq ($(LINK_INTO),AOUT)
   LIBJVM.o                 =
   LIBJVM_MAPFILE           =
@@ -276,6 +284,7 @@
   LIBJVM_MAPFILE$(LDNOMAP) = mapfile_extended
   LFLAGS_VM$(LDNOMAP)      += $(MAPFLAG:FILENAME=$(LIBJVM_MAPFILE))
   LFLAGS_VM                += $(SONAMEFLAG:SONAME=$(LIBJVM))
+  LFLAGS_VM                += -Wl,-z,defs
 ifndef USE_GCC
   LIBS_VM                  = $(LIBS)
 else
--- a/hotspot/src/cpu/ppc/vm/c2_globals_ppc.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/c2_globals_ppc.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -79,6 +79,9 @@
 
 define_pd_global(intx, InitialCodeCacheSize,         2048*K); // Integral multiple of CodeCacheExpansionSize
 define_pd_global(intx, ReservedCodeCacheSize,        256*M);
+define_pd_global(intx, NonProfiledCodeHeapSize,      125*M);
+define_pd_global(intx, ProfiledCodeHeapSize,         126*M);
+define_pd_global(intx, NonMethodCodeHeapSize,        5*M  );
 define_pd_global(intx, CodeCacheExpansionSize,       64*K);
 
 // Ergonomics related flags
--- a/hotspot/src/cpu/ppc/vm/frame_ppc.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/frame_ppc.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -308,3 +308,10 @@
   // unused... but returns fp() to minimize changes introduced by 7087445
   return fp();
 }
+
+#ifndef PRODUCT
+// This is a generic constructor which is only used by pns() in debug.cpp.
+frame::frame(void* sp, void* fp, void* pc) : _sp((intptr_t*)sp), _unextended_sp((intptr_t*)sp) {
+  find_codeblob_and_set_pc_and_deopt_state((address)pc); // also sets _fp and adjusts _unextended_sp
+}
+#endif
--- a/hotspot/src/cpu/sparc/vm/c1_globals_sparc.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/c1_globals_sparc.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -47,6 +47,9 @@
 define_pd_global(intx, FreqInlineSize,               325  );
 define_pd_global(bool, ResizeTLAB,                   true );
 define_pd_global(intx, ReservedCodeCacheSize,        32*M );
+define_pd_global(intx, NonProfiledCodeHeapSize,      13*M );
+define_pd_global(intx, ProfiledCodeHeapSize,         14*M );
+define_pd_global(intx, NonMethodCodeHeapSize,        5*M  );
 define_pd_global(intx, CodeCacheExpansionSize,       32*K );
 define_pd_global(uintx, CodeCacheMinBlockLength,     1);
 define_pd_global(uintx, CodeCacheMinimumUseSpace,    400*K);
--- a/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -74,6 +74,9 @@
 // InitialCodeCacheSize derived from specjbb2000 run.
 define_pd_global(intx, InitialCodeCacheSize,         2048*K); // Integral multiple of CodeCacheExpansionSize
 define_pd_global(intx, ReservedCodeCacheSize,        48*M);
+define_pd_global(intx, NonProfiledCodeHeapSize,      21*M);
+define_pd_global(intx, ProfiledCodeHeapSize,         22*M);
+define_pd_global(intx, NonMethodCodeHeapSize,        5*M );
 define_pd_global(intx, CodeCacheExpansionSize,       64*K);
 
 // Ergonomics related flags
@@ -82,6 +85,9 @@
 // InitialCodeCacheSize derived from specjbb2000 run.
 define_pd_global(intx, InitialCodeCacheSize,         1536*K); // Integral multiple of CodeCacheExpansionSize
 define_pd_global(intx, ReservedCodeCacheSize,        32*M);
+define_pd_global(intx, NonProfiledCodeHeapSize,      13*M);
+define_pd_global(intx, ProfiledCodeHeapSize,         14*M);
+define_pd_global(intx, NonMethodCodeHeapSize,        5*M );
 define_pd_global(intx, CodeCacheExpansionSize,       32*K);
 // Ergonomics related flags
 define_pd_global(uint64_t,MaxRAM,                    4ULL*G);
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -343,7 +343,7 @@
 // constructors
 
 // Construct an unpatchable, deficient frame
-frame::frame(intptr_t* sp, unpatchable_t, address pc, CodeBlob* cb) {
+void frame::init(intptr_t* sp, address pc, CodeBlob* cb) {
 #ifdef _LP64
   assert( (((intptr_t)sp & (wordSize-1)) == 0), "frame constructor passed an invalid sp");
 #endif
@@ -365,6 +365,10 @@
 #endif // ASSERT
 }
 
+frame::frame(intptr_t* sp, unpatchable_t, address pc, CodeBlob* cb) {
+  init(sp, pc, cb);
+}
+
 frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpreted) :
   _sp(sp),
   _younger_sp(younger_sp),
@@ -419,6 +423,13 @@
   }
 }
 
+#ifndef PRODUCT
+// This is a generic constructor which is only used by pns() in debug.cpp.
+frame::frame(void* sp, void* fp, void* pc) {
+  init((intptr_t*)sp, (address)pc, NULL);
+}
+#endif
+
 bool frame::is_interpreted_frame() const  {
   return Interpreter::contains(pc());
 }
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -163,6 +163,8 @@
   enum unpatchable_t { unpatchable };
   frame(intptr_t* sp, unpatchable_t, address pc = NULL, CodeBlob* cb = NULL);
 
+  void init(intptr_t* sp, address pc, CodeBlob* cb);
+
   // Walk from sp outward looking for old_sp, and return old_sp's predecessor
   // (i.e. return the sp from the frame where old_sp is the fp).
   // Register windows are assumed to be flushed for the stack in question.
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -1128,51 +1128,82 @@
     // Hoist any int/ptr/long's in the first 6 to int regs.
     // Hoist any flt/dbl's in the first 16 dbl regs.
     int j = 0;                  // Count of actual args, not HALVES
-    for( int i=0; i<total_args_passed; i++, j++ ) {
-      switch( sig_bt[i] ) {
+    VMRegPair param_array_reg;  // location of the argument in the parameter array
+    for (int i = 0; i < total_args_passed; i++, j++) {
+      param_array_reg.set_bad();
+      switch (sig_bt[i]) {
       case T_BOOLEAN:
       case T_BYTE:
       case T_CHAR:
       case T_INT:
       case T_SHORT:
-        regs[i].set1( int_stk_helper( j ) ); break;
+        regs[i].set1(int_stk_helper(j));
+        break;
       case T_LONG:
-        assert( sig_bt[i+1] == T_VOID, "expecting half" );
+        assert(sig_bt[i+1] == T_VOID, "expecting half");
       case T_ADDRESS: // raw pointers, like current thread, for VM calls
       case T_ARRAY:
       case T_OBJECT:
       case T_METADATA:
-        regs[i].set2( int_stk_helper( j ) );
+        regs[i].set2(int_stk_helper(j));
         break;
       case T_FLOAT:
-        if ( j < 16 ) {
-          // V9ism: floats go in ODD registers
-          regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg());
-        } else {
-          // V9ism: floats go in ODD stack slot
-          regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1)));
+        // Per SPARC Compliance Definition 2.4.1, page 3P-12 available here
+        // http://www.sparc.org/wp-content/uploads/2014/01/SCD.2.4.1.pdf.gz
+        //
+        // "When a callee prototype exists, and does not indicate variable arguments,
+        // floating-point values assigned to locations %sp+BIAS+128 through %sp+BIAS+248
+        // will be promoted to floating-point registers"
+        //
+        // By "promoted" it means that the argument is located in two places, an unused
+        // spill slot in the "parameter array" (starts at %sp+BIAS+128), and a live
+        // float register.  In most cases, there are 6 or fewer arguments of any type,
+        // and the standard parameter array slots (%sp+BIAS+128 to %sp+BIAS+176 exclusive)
+        // serve as shadow slots.  Per the spec floating point registers %d6 to %d16
+        // require slots beyond that (up to %sp+BIAS+248).
+        //
+        {
+          // V9ism: floats go in ODD registers and stack slots
+          int float_index = 1 + (j << 1);
+          param_array_reg.set1(VMRegImpl::stack2reg(float_index));
+          if (j < 16) {
+            regs[i].set1(as_FloatRegister(float_index)->as_VMReg());
+          } else {
+            regs[i] = param_array_reg;
+          }
         }
         break;
       case T_DOUBLE:
-        assert( sig_bt[i+1] == T_VOID, "expecting half" );
-        if ( j < 16 ) {
-          // V9ism: doubles go in EVEN/ODD regs
-          regs[i].set2(as_FloatRegister(j<<1)->as_VMReg());
-        } else {
-          // V9ism: doubles go in EVEN/ODD stack slots
-          regs[i].set2(VMRegImpl::stack2reg(j<<1));
+        {
+          assert(sig_bt[i + 1] == T_VOID, "expecting half");
+          // V9ism: doubles go in EVEN/ODD regs and stack slots
+          int double_index = (j << 1);
+          param_array_reg.set2(VMRegImpl::stack2reg(double_index));
+          if (j < 16) {
+            regs[i].set2(as_FloatRegister(double_index)->as_VMReg());
+          } else {
+            // V9ism: doubles go in EVEN/ODD stack slots
+            regs[i] = param_array_reg;
+          }
         }
         break;
-      case T_VOID:  regs[i].set_bad(); j--; break; // Do not count HALVES
+      case T_VOID:
+        regs[i].set_bad();
+        j--;
+        break; // Do not count HALVES
       default:
         ShouldNotReachHere();
       }
-      if (regs[i].first()->is_stack()) {
-        int off =  regs[i].first()->reg2stack();
+      // Keep track of the deepest parameter array slot.
+      if (!param_array_reg.first()->is_valid()) {
+        param_array_reg = regs[i];
+      }
+      if (param_array_reg.first()->is_stack()) {
+        int off = param_array_reg.first()->reg2stack();
         if (off > max_stack_slots) max_stack_slots = off;
       }
-      if (regs[i].second()->is_stack()) {
-        int off =  regs[i].second()->reg2stack();
+      if (param_array_reg.second()->is_stack()) {
+        int off = param_array_reg.second()->reg2stack();
         if (off > max_stack_slots) max_stack_slots = off;
       }
     }
@@ -1180,8 +1211,8 @@
 #else // _LP64
     // V8 convention: first 6 things in O-regs, rest on stack.
     // Alignment is willy-nilly.
-    for( int i=0; i<total_args_passed; i++ ) {
-      switch( sig_bt[i] ) {
+    for (int i = 0; i < total_args_passed; i++) {
+      switch (sig_bt[i]) {
       case T_ADDRESS: // raw pointers, like current thread, for VM calls
       case T_ARRAY:
       case T_BOOLEAN:
@@ -1192,23 +1223,23 @@
       case T_OBJECT:
       case T_METADATA:
       case T_SHORT:
-        regs[i].set1( int_stk_helper( i ) );
+        regs[i].set1(int_stk_helper(i));
         break;
       case T_DOUBLE:
       case T_LONG:
-        assert( sig_bt[i+1] == T_VOID, "expecting half" );
-        regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) );
+        assert(sig_bt[i + 1] == T_VOID, "expecting half");
+        regs[i].set_pair(int_stk_helper(i + 1), int_stk_helper(i));
         break;
       case T_VOID: regs[i].set_bad(); break;
       default:
         ShouldNotReachHere();
       }
       if (regs[i].first()->is_stack()) {
-        int off =  regs[i].first()->reg2stack();
+        int off = regs[i].first()->reg2stack();
         if (off > max_stack_slots) max_stack_slots = off;
       }
       if (regs[i].second()->is_stack()) {
-        int off =  regs[i].second()->reg2stack();
+        int off = regs[i].second()->reg2stack();
         if (off > max_stack_slots) max_stack_slots = off;
       }
     }
@@ -1357,11 +1388,10 @@
     const Register rOop = src.first()->as_Register();
     const Register rHandle = L5;
     int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
-    int offset = oop_slot*VMRegImpl::stack_slot_size;
-    Label skip;
+    int offset = oop_slot * VMRegImpl::stack_slot_size;
     __ st_ptr(rOop, SP, offset + STACK_BIAS);
     if (is_receiver) {
-      *receiver_offset = oop_slot * VMRegImpl::stack_slot_size;
+       *receiver_offset = offset;
     }
     map->set_oop(VMRegImpl::stack2reg(oop_slot));
     __ add(SP, offset + STACK_BIAS, rHandle);
--- a/hotspot/src/cpu/sparc/vm/sparc.ad	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad	Fri Sep 26 01:40:31 2014 -0700
@@ -1989,7 +1989,7 @@
 // to implement the UseStrictFP mode.
 const bool Matcher::strict_fp_requires_explicit_rounding = false;
 
-// Are floats conerted to double when stored to stack during deoptimization?
+// Are floats converted to double when stored to stack during deoptimization?
 // Sparc does not handle callee-save floats.
 bool Matcher::float_in_double() { return false; }
 
@@ -3218,7 +3218,7 @@
 //         are owned by the CALLEE.  Holes should not be nessecary in the
 //         incoming area, as the Java calling convention is completely under
 //         the control of the AD file.  Doubles can be sorted and packed to
-//         avoid holes.  Holes in the outgoing arguments may be nessecary for
+//         avoid holes.  Holes in the outgoing arguments may be necessary for
 //         varargs C calling conventions.
 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 //         even aligned with pad0 as needed.
@@ -3284,7 +3284,7 @@
   %}
 
   // Body of function which returns an OptoRegs array locating
-  // arguments either in registers or in stack slots for callin
+  // arguments either in registers or in stack slots for calling
   // C.
   c_calling_convention %{
     // This is obviously always outgoing
--- a/hotspot/src/cpu/x86/vm/c1_globals_x86.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/c1_globals_x86.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -47,6 +47,9 @@
 define_pd_global(intx, NewSizeThreadIncrease,        4*K  );
 define_pd_global(intx, InitialCodeCacheSize,         160*K);
 define_pd_global(intx, ReservedCodeCacheSize,        32*M );
+define_pd_global(intx, NonProfiledCodeHeapSize,      13*M );
+define_pd_global(intx, ProfiledCodeHeapSize,         14*M );
+define_pd_global(intx, NonMethodCodeHeapSize,        5*M  );
 define_pd_global(bool, ProfileInterpreter,           false);
 define_pd_global(intx, CodeCacheExpansionSize,       32*K );
 define_pd_global(uintx, CodeCacheMinBlockLength,     1);
--- a/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -84,6 +84,9 @@
 define_pd_global(bool, OptoBundling,                 false);
 
 define_pd_global(intx, ReservedCodeCacheSize,        48*M);
+define_pd_global(intx, NonProfiledCodeHeapSize,      21*M);
+define_pd_global(intx, ProfiledCodeHeapSize,         22*M);
+define_pd_global(intx, NonMethodCodeHeapSize,        5*M );
 define_pd_global(uintx, CodeCacheMinBlockLength,     4);
 define_pd_global(uintx, CodeCacheMinimumUseSpace,    400*K);
 
--- a/hotspot/src/cpu/x86/vm/frame_x86.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/frame_x86.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -715,3 +715,10 @@
   assert(! is_compiled_frame(), "unknown compiled frame size");
   return fp();
 }
+
+#ifndef PRODUCT
+// This is a generic constructor which is only used by pns() in debug.cpp.
+frame::frame(void* sp, void* fp, void* pc) {
+  init((intptr_t*)sp, (intptr_t*)fp, (address)pc);
+}
+#endif
--- a/hotspot/src/cpu/x86/vm/frame_x86.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/frame_x86.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -187,6 +187,8 @@
 
   frame(intptr_t* sp, intptr_t* fp);
 
+  void init(intptr_t* sp, intptr_t* fp, address pc);
+
   // accessors for the instance variables
   // Note: not necessarily the real 'frame pointer' (see real_fp)
   intptr_t*   fp() const { return _fp; }
--- a/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -41,7 +41,7 @@
   _deopt_state = unknown;
 }
 
-inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
+inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
   _sp = sp;
   _unextended_sp = sp;
   _fp = fp;
@@ -59,6 +59,10 @@
   }
 }
 
+inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
+  init(sp, fp, pc);
+}
+
 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
   _sp = sp;
   _unextended_sp = unextended_sp;
--- a/hotspot/src/cpu/zero/vm/frame_zero.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/cpu/zero/vm/frame_zero.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -438,3 +438,10 @@
   // unused... but returns fp() to minimize changes introduced by 7087445
   return fp();
 }
+
+#ifndef PRODUCT
+// This is a generic constructor which is only used by pns() in debug.cpp.
+frame::frame(void* sp, void* fp, void* pc) {
+  Unimplemented();
+}
+#endif
--- a/hotspot/src/cpu/zero/vm/shark_globals_zero.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/cpu/zero/vm/shark_globals_zero.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -53,6 +53,9 @@
 define_pd_global(intx,     NewSizeThreadIncrease,        4*K  );
 define_pd_global(intx,     InitialCodeCacheSize,         160*K);
 define_pd_global(intx,     ReservedCodeCacheSize,        32*M );
+define_pd_global(intx,     NonProfiledCodeHeapSize,      13*M );
+define_pd_global(intx,     ProfiledCodeHeapSize,         14*M );
+define_pd_global(intx,     NonMethodCodeHeapSize,        5*M  );
 define_pd_global(bool,     ProfileInterpreter,           false);
 define_pd_global(intx,     CodeCacheExpansionSize,       32*K );
 define_pd_global(uintx,    CodeCacheMinBlockLength,      1    );
--- a/hotspot/src/os/bsd/dtrace/generateJvmOffsets.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/os/bsd/dtrace/generateJvmOffsets.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -67,7 +67,7 @@
  * we link this program with -z nodefs .
  *
  * But for 'debug1' and 'fastdebug1' we still have to provide
- * a particular workaround for the following symbols bellow.
+ * a particular workaround for the following symbols below.
  * It will be good to find out a generic way in the future.
  */
 
@@ -87,21 +87,24 @@
 #endif /* ASSERT */
 #endif /* COMPILER1 */
 
-#define GEN_OFFS(Type,Name)                             \
+#define GEN_OFFS_NAME(Type,Name,OutputType)             \
   switch(gen_variant) {                                 \
   case GEN_OFFSET:                                      \
-    printf("#define OFFSET_%-33s %ld\n",                 \
-           #Type #Name, offset_of(Type, Name)); \
+    printf("#define OFFSET_%-33s %ld\n",                \
+            #OutputType #Name, offset_of(Type, Name));  \
     break;                                              \
   case GEN_INDEX:                                       \
     printf("#define IDX_OFFSET_%-33s %d\n",             \
-            #Type #Name, index++);                      \
+            #OutputType #Name, index++);                \
     break;                                              \
   case GEN_TABLE:                                       \
-    printf("\tOFFSET_%s,\n", #Type #Name);              \
+    printf("\tOFFSET_%s,\n", #OutputType #Name);        \
     break;                                              \
   }
 
+#define GEN_OFFS(Type,Name)                             \
+  GEN_OFFS_NAME(Type,Name,Type)
+
 #define GEN_SIZE(Type)                                  \
   switch(gen_variant) {                                 \
   case GEN_OFFSET:                                      \
@@ -246,6 +249,11 @@
   GEN_OFFS(VirtualSpace, _high);
   printf("\n");
 
+  /* We need to use different names here because of the template parameter */
+  GEN_OFFS_NAME(GrowableArray<CodeHeap*>, _data, GrowableArray_CodeHeap);
+  GEN_OFFS_NAME(GrowableArray<CodeHeap*>, _len, GrowableArray_CodeHeap);
+  printf("\n");
+
   GEN_OFFS(CodeBlob, _name);
   GEN_OFFS(CodeBlob, _header_size);
   GEN_OFFS(CodeBlob, _content_offset);
--- a/hotspot/src/os/bsd/dtrace/jhelper.d	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/os/bsd/dtrace/jhelper.d	Fri Sep 26 01:40:31 2014 -0700
@@ -43,7 +43,9 @@
 
 extern pointer __JvmOffsets;
 
-extern pointer __1cJCodeCacheF_heap_;
+/* GrowableArray<CodeHeaps*>* */
+extern pointer __1cJCodeCacheG_heaps_;
+
 extern pointer __1cIUniverseO_collectedHeap_;
 
 extern pointer __1cHnmethodG__vtbl_;
@@ -95,8 +97,8 @@
 /!init_done && !this->done/
 {
   MARK_LINE;
-  init_done = 1;
 
+  copyin_offset(POINTER_SIZE);
   copyin_offset(COMPILER);
   copyin_offset(OFFSET_CollectedHeap_reserved);
   copyin_offset(OFFSET_MemRegion_start);
@@ -122,6 +124,9 @@
   copyin_offset(OFFSET_CodeHeap_segmap);
   copyin_offset(OFFSET_CodeHeap_log2_segment_size);
 
+  copyin_offset(OFFSET_GrowableArray_CodeHeap_data);
+  copyin_offset(OFFSET_GrowableArray_CodeHeap_len);
+
   copyin_offset(OFFSET_VirtualSpace_low);
   copyin_offset(OFFSET_VirtualSpace_high);
 
@@ -152,26 +157,14 @@
 #error "Don't know architecture"
 #endif
 
-  this->CodeCache_heap_address = copyin_ptr(&``__1cJCodeCacheF_heap_);
-
-  /* Reading volatile values */
-  this->CodeCache_low = copyin_ptr(this->CodeCache_heap_address + 
-      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
-
-  this->CodeCache_high = copyin_ptr(this->CodeCache_heap_address +
-      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+  /* Read address of GrowableArray<CodeHeaps*> */
+  this->code_heaps_address = copyin_ptr(&``__1cJCodeCacheG_heaps_);
+  /* Read address of _data array field in GrowableArray */
+  this->code_heaps_array_address = copyin_ptr(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_data);
+  this->number_of_heaps = copyin_uint32(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_len);
 
-  this->CodeCache_segmap_low = copyin_ptr(this->CodeCache_heap_address +
-      OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low);
-
-  this->CodeCache_segmap_high = copyin_ptr(this->CodeCache_heap_address +
-      OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_high);
-
-  this->CodeHeap_log2_segment_size = copyin_uint32(
-      this->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size);
-
-  this->Method_vtbl             = (pointer) &``__1cNMethodG__vtbl_;
-
+  this->Method_vtbl = (pointer) &``__1cGMethodG__vtbl_;
+  
   /*
    * Get Java heap bounds
    */
@@ -187,21 +180,152 @@
   this->heap_end = this->heap_start + this->heap_size;
 }
 
+/*
+ * IMPORTANT: At the moment the ustack helper supports up to 5 code heaps in 
+ * the code cache. If more code heaps are added the following probes have to 
+ * be extended. This is done by simply adding a probe to get the heap bounds
+ * and another probe to set the code heap address of the newly created heap.
+ */
+
+/*
+ * ----- BEGIN: Get bounds of code heaps -----
+ */
 dtrace:helper:ustack:
-/!this->done &&
-this->CodeCache_low <= this->pc && this->pc < this->CodeCache_high/
+/init_done < 1 && this->number_of_heaps >= 1 && !this->done/
+{
+  MARK_LINE;
+  /* CodeHeap 1 */
+  init_done = 1;
+  this->code_heap1_address = copyin_ptr(this->code_heaps_array_address);
+  this->code_heap1_low = copyin_ptr(this->code_heap1_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap1_high = copyin_ptr(this->code_heap1_address +
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+}
+
+dtrace:helper:ustack:
+/init_done < 2 && this->number_of_heaps >= 2 && !this->done/
+{
+  MARK_LINE;
+  /* CodeHeap 2 */
+  init_done = 2;
+  this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
+  this->code_heap2_address = copyin_ptr(this->code_heaps_array_address);
+  this->code_heap2_low = copyin_ptr(this->code_heap2_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap2_high = copyin_ptr(this->code_heap2_address +
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+}
+
+dtrace:helper:ustack:
+/init_done < 3 && this->number_of_heaps >= 3 && !this->done/
+{
+  /* CodeHeap 3 */
+  init_done = 3;
+  this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
+  this->code_heap3_address = copyin_ptr(this->code_heaps_array_address);
+  this->code_heap3_low = copyin_ptr(this->code_heap3_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap3_high = copyin_ptr(this->code_heap3_address +
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+}
+
+dtrace:helper:ustack:
+/init_done < 4 && this->number_of_heaps >= 4 && !this->done/
+{
+  /* CodeHeap 4 */
+  init_done = 4;
+  this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
+  this->code_heap4_address = copyin_ptr(this->code_heaps_array_address);
+  this->code_heap4_low = copyin_ptr(this->code_heap4_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap4_high = copyin_ptr(this->code_heap4_address +
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+}
+
+dtrace:helper:ustack:
+/init_done < 5 && this->number_of_heaps >= 5 && !this->done/
+{
+  /* CodeHeap 5 */
+  init_done = 5;
+  this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
+  this->code_heap5_address = copyin_ptr(this->code_heaps_array_address);
+  this->code_heap5_low = copyin_ptr(this->code_heap5_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap5_high = copyin_ptr(this->code_heap5_address +
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+}
+/*
+ * ----- END: Get bounds of code heaps -----
+ */
+
+/*
+ * ----- BEGIN: Get address of the code heap pc points to -----
+ */
+dtrace:helper:ustack:
+/!this->done && this->number_of_heaps >= 1 && this->code_heap1_low <= this->pc && this->pc < this->code_heap1_high/
 {
   MARK_LINE;
   this->codecache = 1;
+  this->code_heap_address = this->code_heap1_address;
+}
+
+dtrace:helper:ustack:
+/!this->done && this->number_of_heaps >= 2 && this->code_heap2_low <= this->pc && this->pc < this->code_heap2_high/
+{
+  MARK_LINE;
+  this->codecache = 1;
+  this->code_heap_address = this->code_heap2_address;
+}
+
+dtrace:helper:ustack:
+/!this->done && this->number_of_heaps >= 3 && this->code_heap3_low <= this->pc && this->pc < this->code_heap3_high/
+{
+  MARK_LINE;
+  this->codecache = 1;
+  this->code_heap_address = this->code_heap3_address;
+}
+
+dtrace:helper:ustack:
+/!this->done && this->number_of_heaps >= 4 && this->code_heap4_low <= this->pc && this->pc < this->code_heap4_high/
+{
+  MARK_LINE;
+  this->codecache = 1;
+  this->code_heap_address = this->code_heap4_address;
+}
+
+dtrace:helper:ustack:
+/!this->done && this->number_of_heaps >= 5 && this->code_heap5_low <= this->pc && this->pc < this->code_heap5_high/
+{
+  MARK_LINE;
+  this->codecache = 1;
+  this->code_heap_address = this->code_heap5_address;
+}
+/*
+ * ----- END: Get address of the code heap pc points to -----
+ */
+
+dtrace:helper:ustack:
+/!this->done && this->codecache/
+{
+  MARK_LINE;
+  /* 
+   * Get code heap configuration
+   */
+  this->code_heap_low = copyin_ptr(this->code_heap_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap_segmap_low = copyin_ptr(this->code_heap_address +
+      OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low);
+  this->code_heap_log2_segment_size = copyin_uint32(
+      this->code_heap_address + OFFSET_CodeHeap_log2_segment_size);
 
   /*
-   * Find start.
+   * Find start
    */
-  this->segment = (this->pc - this->CodeCache_low) >>
-    this->CodeHeap_log2_segment_size;
-  this->block = this->CodeCache_segmap_low;
+  this->segment = (this->pc - this->code_heap_low) >>
+    this->code_heap_log2_segment_size;
+  this->block = this->code_heap_segmap_low;
   this->tag = copyin_uchar(this->block + this->segment);
-  "second";
 }
 
 dtrace:helper:ustack:
@@ -256,8 +380,8 @@
 /!this->done && this->codecache/
 {
   MARK_LINE;
-  this->block = this->CodeCache_low +
-    (this->segment << this->CodeHeap_log2_segment_size);
+  this->block = this->code_heap_low +
+    (this->segment << this->code_heap_log2_segment_size);
   this->used = copyin_uint32(this->block + OFFSET_HeapBlockHeader_used);
 }
 
--- a/hotspot/src/os/bsd/dtrace/libjvm_db.c	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/os/bsd/dtrace/libjvm_db.c	Fri Sep 26 01:40:31 2014 -0700
@@ -150,16 +150,18 @@
   uint64_t Use_Compressed_Oops_address;
   uint64_t Universe_narrow_oop_base_address;
   uint64_t Universe_narrow_oop_shift_address;
-  uint64_t CodeCache_heap_address;
+  uint64_t CodeCache_heaps_address;
 
   /* Volatiles */
   uint8_t  Use_Compressed_Oops;
   uint64_t Universe_narrow_oop_base;
   uint32_t Universe_narrow_oop_shift;
-  uint64_t CodeCache_low;
-  uint64_t CodeCache_high;
-  uint64_t CodeCache_segmap_low;
-  uint64_t CodeCache_segmap_high;
+  // Code cache heaps
+  int32_t  Number_of_heaps;
+  uint64_t* Heap_low;
+  uint64_t* Heap_high;
+  uint64_t* Heap_segmap_low;
+  uint64_t* Heap_segmap_high;
 
   int32_t  SIZE_CodeCache_log2_segment;
 
@@ -278,8 +280,9 @@
     }
 
     if (vmp->typeName[0] == 'C' && strcmp("CodeCache", vmp->typeName) == 0) {
-      if (strcmp("_heap", vmp->fieldName) == 0) {
-        err = read_pointer(J, vmp->address, &J->CodeCache_heap_address);
+      /* Read _heaps field of type GrowableArray<CodeHeaps*>*      */
+      if (strcmp("_heaps", vmp->fieldName) == 0) {
+        err = read_pointer(J, vmp->address, &J->CodeCache_heaps_address);
       }
     } else if (vmp->typeName[0] == 'U' && strcmp("Universe", vmp->typeName) == 0) {
       if (strcmp("_narrow_oop._base", vmp->fieldName) == 0) {
@@ -318,7 +321,9 @@
 }
 
 static int read_volatiles(jvm_agent_t* J) {
-  uint64_t ptr;
+  int i;
+  uint64_t array_data;
+  uint64_t code_heap_address;
   int err;
 
   err = find_symbol(J, "UseCompressedOops", &J->Use_Compressed_Oops_address);
@@ -334,20 +339,43 @@
   err = ps_pread(J->P,  J->Universe_narrow_oop_shift_address, &J->Universe_narrow_oop_shift, sizeof(uint32_t));
   CHECK_FAIL(err);
 
-  err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
-                     OFFSET_VirtualSpace_low, &J->CodeCache_low);
-  CHECK_FAIL(err);
-  err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
-                     OFFSET_VirtualSpace_high, &J->CodeCache_high);
-  CHECK_FAIL(err);
-  err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap +
-                     OFFSET_VirtualSpace_low, &J->CodeCache_segmap_low);
-  CHECK_FAIL(err);
-  err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap +
-                     OFFSET_VirtualSpace_high, &J->CodeCache_segmap_high);
-  CHECK_FAIL(err);
+  /* CodeCache_heaps_address points to GrowableArray<CodeHeaps*>, read _data field
+     pointing to the first entry of type CodeCache* in the array */
+  err = read_pointer(J, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_data, &array_data);
+  /* Read _len field containing the number of code heaps */
+  err = ps_pread(J->P, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_len,
+                 &J->Number_of_heaps, sizeof(J->Number_of_heaps));
+
+  /* Allocate memory for heap configurations */
+  J->Heap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
+  J->Heap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
+  J->Heap_segmap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
+  J->Heap_segmap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
+
+  /* Read code heap configurations */
+  for (i = 0; i < J->Number_of_heaps; ++i) {
+    /* Read address of heap */
+    err = read_pointer(J, array_data, &code_heap_address);
+    CHECK_FAIL(err);
 
-  err = ps_pread(J->P, J->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size,
+    err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory +
+                       OFFSET_VirtualSpace_low, &J->Heap_low[i]);
+    CHECK_FAIL(err);
+    err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory +
+                       OFFSET_VirtualSpace_high, &J->Heap_high[i]);
+    CHECK_FAIL(err);
+    err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap +
+                       OFFSET_VirtualSpace_low, &J->Heap_segmap_low[i]);
+    CHECK_FAIL(err);
+    err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap +
+                       OFFSET_VirtualSpace_high, &J->Heap_segmap_high[i]);
+    CHECK_FAIL(err);
+
+    /* Increment pointer to next entry */
+    array_data = array_data + POINTER_SIZE;
+  }
+
+  err = ps_pread(J->P, code_heap_address + OFFSET_CodeHeap_log2_segment_size,
                  &J->SIZE_CodeCache_log2_segment, sizeof(J->SIZE_CodeCache_log2_segment));
   CHECK_FAIL(err);
 
@@ -357,46 +385,57 @@
   return err;
 }
 
-
-static int codecache_contains(jvm_agent_t* J, uint64_t ptr) {
-  /* make sure the code cache is up to date */
-  return (J->CodeCache_low <= ptr && ptr < J->CodeCache_high);
+static int codeheap_contains(int heap_num, jvm_agent_t* J, uint64_t ptr) {
+  return (J->Heap_low[heap_num] <= ptr && ptr < J->Heap_high[heap_num]);
 }
 
-static uint64_t segment_for(jvm_agent_t* J, uint64_t p) {
-  return (p - J->CodeCache_low) >> J->SIZE_CodeCache_log2_segment;
+static int codecache_contains(jvm_agent_t* J, uint64_t ptr) {
+  int i;
+  for (i = 0; i < J->Number_of_heaps; ++i) {
+    if (codeheap_contains(i, J, ptr)) {
+      return 1;
+    }
+  }
+  return 0;
 }
 
-static uint64_t block_at(jvm_agent_t* J, int i) {
-  return J->CodeCache_low + (i << J->SIZE_CodeCache_log2_segment);
+static uint64_t segment_for(int heap_num, jvm_agent_t* J, uint64_t p) {
+  return (p - J->Heap_low[heap_num]) >> J->SIZE_CodeCache_log2_segment;
+}
+
+static uint64_t block_at(int heap_num, jvm_agent_t* J, int i) {
+  return J->Heap_low[heap_num] + (i << J->SIZE_CodeCache_log2_segment);
 }
 
 static int find_start(jvm_agent_t* J, uint64_t ptr, uint64_t *startp) {
   int err;
+  int i;
 
-  *startp = 0;
-  if (J->CodeCache_low <= ptr && ptr < J->CodeCache_high) {
-    int32_t used;
-    uint64_t segment = segment_for(J, ptr);
-    uint64_t block = J->CodeCache_segmap_low;
-    uint8_t tag;
-    err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
-    CHECK_FAIL(err);
-    if (tag == 0xff)
-      return PS_OK;
-    while (tag > 0) {
+  for (i = 0; i < J->Number_of_heaps; ++i) {
+    *startp = 0;
+    if (codeheap_contains(i, J, ptr)) {
+      int32_t used;
+      uint64_t segment = segment_for(i, J, ptr);
+      uint64_t block = J->Heap_segmap_low[i];
+      uint8_t tag;
       err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
       CHECK_FAIL(err);
-      segment -= tag;
+      if (tag == 0xff)
+        return PS_OK;
+      while (tag > 0) {
+        err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
+        CHECK_FAIL(err);
+        segment -= tag;
+      }
+      block = block_at(i, J, segment);
+      err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used));
+      CHECK_FAIL(err);
+      if (used) {
+        *startp = block + SIZE_HeapBlockHeader;
+      }
     }
-    block = block_at(J, segment);
-    err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used));
-    CHECK_FAIL(err);
-    if (used) {
-      *startp = block + SIZE_HeapBlockHeader;
-    }
+    return PS_OK;
   }
-  return PS_OK;
 
  fail:
   return -1;
--- a/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -82,21 +82,24 @@
 #endif /* ASSERT */
 #endif /* COMPILER1 */
 
-#define GEN_OFFS(Type,Name)                             \
+#define GEN_OFFS_NAME(Type,Name,OutputType)             \
   switch(gen_variant) {                                 \
   case GEN_OFFSET:                                      \
     printf("#define OFFSET_%-33s %d\n",                 \
-            #Type #Name, offset_of(Type, Name));        \
+            #OutputType #Name, offset_of(Type, Name));  \
     break;                                              \
   case GEN_INDEX:                                       \
     printf("#define IDX_OFFSET_%-33s %d\n",             \
-            #Type #Name, index++);                      \
+            #OutputType #Name, index++);                \
     break;                                              \
   case GEN_TABLE:                                       \
-    printf("\tOFFSET_%s,\n", #Type #Name);              \
+    printf("\tOFFSET_%s,\n", #OutputType #Name);        \
     break;                                              \
   }
 
+#define GEN_OFFS(Type,Name)                             \
+  GEN_OFFS_NAME(Type,Name,Type)
+
 #define GEN_SIZE(Type)                                  \
   switch(gen_variant) {                                 \
   case GEN_OFFSET:                                      \
@@ -241,6 +244,11 @@
   GEN_OFFS(VirtualSpace, _high);
   printf("\n");
 
+  /* We need to use different names here because of the template parameter */
+  GEN_OFFS_NAME(GrowableArray<CodeHeap*>, _data, GrowableArray_CodeHeap);
+  GEN_OFFS_NAME(GrowableArray<CodeHeap*>, _len, GrowableArray_CodeHeap);
+  printf("\n");
+
   GEN_OFFS(CodeBlob, _name);
   GEN_OFFS(CodeBlob, _header_size);
   GEN_OFFS(CodeBlob, _content_offset);
--- a/hotspot/src/os/solaris/dtrace/jhelper.d	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/os/solaris/dtrace/jhelper.d	Fri Sep 26 01:40:31 2014 -0700
@@ -43,7 +43,9 @@
 
 extern pointer __JvmOffsets;
 
-extern pointer __1cJCodeCacheF_heap_;
+/* GrowableArray<CodeHeaps*>* */
+extern pointer __1cJCodeCacheG_heaps_;
+
 extern pointer __1cIUniverseO_collectedHeap_;
 
 extern pointer __1cHnmethodG__vtbl_;
@@ -95,8 +97,8 @@
 /!init_done && !this->done/
 {
   MARK_LINE;
-  init_done = 1;
-
+  
+  copyin_offset(POINTER_SIZE);
   copyin_offset(COMPILER);
   copyin_offset(OFFSET_CollectedHeap_reserved);
   copyin_offset(OFFSET_MemRegion_start);
@@ -122,6 +124,9 @@
   copyin_offset(OFFSET_CodeHeap_segmap);
   copyin_offset(OFFSET_CodeHeap_log2_segment_size);
 
+  copyin_offset(OFFSET_GrowableArray_CodeHeap_data);
+  copyin_offset(OFFSET_GrowableArray_CodeHeap_len);
+
   copyin_offset(OFFSET_VirtualSpace_low);
   copyin_offset(OFFSET_VirtualSpace_high);
 
@@ -152,24 +157,13 @@
 #error "Don't know architecture"
 #endif
 
-  this->CodeCache_heap_address = copyin_ptr(&``__1cJCodeCacheF_heap_);
-
-  this->CodeCache_low = copyin_ptr(this->CodeCache_heap_address + 
-      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
-
-  this->CodeCache_high = copyin_ptr(this->CodeCache_heap_address +
-      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+  /* Read address of GrowableArray<CodeHeaps*> */
+  this->code_heaps_address = copyin_ptr(&``__1cJCodeCacheG_heaps_);
+  /* Read address of _data array field in GrowableArray */
+  this->code_heaps_array_address = copyin_ptr(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_data);
+  this->number_of_heaps = copyin_uint32(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_len);
 
-  this->CodeCache_segmap_low = copyin_ptr(this->CodeCache_heap_address +
-      OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low);
-
-  this->CodeCache_segmap_high = copyin_ptr(this->CodeCache_heap_address +
-      OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_high);
-
-  this->CodeHeap_log2_segment_size = copyin_uint32(
-      this->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size);
-
-  this->Method_vtbl             = (pointer) &``__1cGMethodG__vtbl_;
+  this->Method_vtbl = (pointer) &``__1cGMethodG__vtbl_;
 
   /*
    * Get Java heap bounds
@@ -186,21 +180,152 @@
   this->heap_end = this->heap_start + this->heap_size;
 }
 
+/*
+ * IMPORTANT: At the moment the ustack helper supports up to 5 code heaps in 
+ * the code cache. If more code heaps are added the following probes have to 
+ * be extended. This is done by simply adding a probe to get the heap bounds
+ * and another probe to set the code heap address of the newly created heap.
+ */
+
+/*
+ * ----- BEGIN: Get bounds of code heaps -----
+ */
 dtrace:helper:ustack:
-/!this->done &&
-this->CodeCache_low <= this->pc && this->pc < this->CodeCache_high/
+/init_done < 1 && this->number_of_heaps >= 1 && !this->done/
+{
+  MARK_LINE;
+  /* CodeHeap 1 */
+  init_done = 1;
+  this->code_heap1_address = copyin_ptr(this->code_heaps_array_address);
+  this->code_heap1_low = copyin_ptr(this->code_heap1_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap1_high = copyin_ptr(this->code_heap1_address +
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+}
+
+dtrace:helper:ustack:
+/init_done < 2 && this->number_of_heaps >= 2 && !this->done/
+{
+  MARK_LINE;
+  /* CodeHeap 2 */
+  init_done = 2;
+  this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
+  this->code_heap2_address = copyin_ptr(this->code_heaps_array_address);
+  this->code_heap2_low = copyin_ptr(this->code_heap2_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap2_high = copyin_ptr(this->code_heap2_address +
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+}
+
+dtrace:helper:ustack:
+/init_done < 3 && this->number_of_heaps >= 3 && !this->done/
+{
+  /* CodeHeap 3 */
+  init_done = 3;
+  this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
+  this->code_heap3_address = copyin_ptr(this->code_heaps_array_address);
+  this->code_heap3_low = copyin_ptr(this->code_heap3_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap3_high = copyin_ptr(this->code_heap3_address +
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+}
+
+dtrace:helper:ustack:
+/init_done < 4 && this->number_of_heaps >= 4 && !this->done/
+{
+  /* CodeHeap 4 */
+  init_done = 4;
+  this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
+  this->code_heap4_address = copyin_ptr(this->code_heaps_array_address);
+  this->code_heap4_low = copyin_ptr(this->code_heap4_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap4_high = copyin_ptr(this->code_heap4_address +
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+}
+
+dtrace:helper:ustack:
+/init_done < 5 && this->number_of_heaps >= 5 && !this->done/
+{
+  /* CodeHeap 5 */
+  init_done = 5;
+  this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
+  this->code_heap5_address = copyin_ptr(this->code_heaps_array_address);
+  this->code_heap5_low = copyin_ptr(this->code_heap5_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap5_high = copyin_ptr(this->code_heap5_address +
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+}
+/*
+ * ----- END: Get bounds of code heaps -----
+ */
+
+/*
+ * ----- BEGIN: Get address of the code heap pc points to -----
+ */
+dtrace:helper:ustack:
+/!this->done && this->number_of_heaps >= 1 && this->code_heap1_low <= this->pc && this->pc < this->code_heap1_high/
 {
   MARK_LINE;
   this->codecache = 1;
+  this->code_heap_address = this->code_heap1_address;
+}
+
+dtrace:helper:ustack:
+/!this->done && this->number_of_heaps >= 2 && this->code_heap2_low <= this->pc && this->pc < this->code_heap2_high/
+{
+  MARK_LINE;
+  this->codecache = 1;
+  this->code_heap_address = this->code_heap2_address;
+}
+
+dtrace:helper:ustack:
+/!this->done && this->number_of_heaps >= 3 && this->code_heap3_low <= this->pc && this->pc < this->code_heap3_high/
+{
+  MARK_LINE;
+  this->codecache = 1;
+  this->code_heap_address = this->code_heap3_address;
+}
+
+dtrace:helper:ustack:
+/!this->done && this->number_of_heaps >= 4 && this->code_heap4_low <= this->pc && this->pc < this->code_heap4_high/
+{
+  MARK_LINE;
+  this->codecache = 1;
+  this->code_heap_address = this->code_heap4_address;
+}
+
+dtrace:helper:ustack:
+/!this->done && this->number_of_heaps >= 5 && this->code_heap5_low <= this->pc && this->pc < this->code_heap5_high/
+{
+  MARK_LINE;
+  this->codecache = 1;
+  this->code_heap_address = this->code_heap5_address;
+}
+/*
+ * ----- END: Get address of the code heap pc points to -----
+ */
+
+dtrace:helper:ustack:
+/!this->done && this->codecache/
+{
+  MARK_LINE;
+  /* 
+   * Get code heap configuration
+   */
+  this->code_heap_low = copyin_ptr(this->code_heap_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap_segmap_low = copyin_ptr(this->code_heap_address +
+      OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low);
+  this->code_heap_log2_segment_size = copyin_uint32(
+      this->code_heap_address + OFFSET_CodeHeap_log2_segment_size);
 
   /*
-   * Find start.
+   * Find start
    */
-  this->segment = (this->pc - this->CodeCache_low) >>
-    this->CodeHeap_log2_segment_size;
-  this->block = this->CodeCache_segmap_low;
+  this->segment = (this->pc - this->code_heap_low) >>
+    this->code_heap_log2_segment_size;
+  this->block = this->code_heap_segmap_low;
   this->tag = copyin_uchar(this->block + this->segment);
-  "second";
 }
 
 dtrace:helper:ustack:
@@ -255,8 +380,8 @@
 /!this->done && this->codecache/
 {
   MARK_LINE;
-  this->block = this->CodeCache_low +
-    (this->segment << this->CodeHeap_log2_segment_size);
+  this->block = this->code_heap_low +
+    (this->segment << this->code_heap_log2_segment_size);
   this->used = copyin_uint32(this->block + OFFSET_HeapBlockHeader_used);
 }
 
--- a/hotspot/src/os/solaris/dtrace/libjvm_db.c	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/os/solaris/dtrace/libjvm_db.c	Fri Sep 26 01:40:31 2014 -0700
@@ -150,16 +150,18 @@
   uint64_t Use_Compressed_Oops_address;
   uint64_t Universe_narrow_oop_base_address;
   uint64_t Universe_narrow_oop_shift_address;
-  uint64_t CodeCache_heap_address;
+  uint64_t CodeCache_heaps_address;
 
   /* Volatiles */
   uint8_t  Use_Compressed_Oops;
   uint64_t Universe_narrow_oop_base;
   uint32_t Universe_narrow_oop_shift;
-  uint64_t CodeCache_low;
-  uint64_t CodeCache_high;
-  uint64_t CodeCache_segmap_low;
-  uint64_t CodeCache_segmap_high;
+  // Code cache heaps
+  int32_t  Number_of_heaps;
+  uint64_t* Heap_low;
+  uint64_t* Heap_high;
+  uint64_t* Heap_segmap_low;
+  uint64_t* Heap_segmap_high;
 
   int32_t  SIZE_CodeCache_log2_segment;
 
@@ -278,8 +280,9 @@
     }
 
     if (vmp->typeName[0] == 'C' && strcmp("CodeCache", vmp->typeName) == 0) {
-      if (strcmp("_heap", vmp->fieldName) == 0) {
-        err = read_pointer(J, vmp->address, &J->CodeCache_heap_address);
+      /* Read _heaps field of type GrowableArray<CodeHeaps*>*      */
+      if (strcmp("_heaps", vmp->fieldName) == 0) {
+        err = read_pointer(J, vmp->address, &J->CodeCache_heaps_address);
       }
     } else if (vmp->typeName[0] == 'U' && strcmp("Universe", vmp->typeName) == 0) {
       if (strcmp("_narrow_oop._base", vmp->fieldName) == 0) {
@@ -318,7 +321,9 @@
 }
 
 static int read_volatiles(jvm_agent_t* J) {
-  uint64_t ptr;
+  int i;
+  uint64_t array_data;
+  uint64_t code_heap_address;
   int err;
 
   err = find_symbol(J, "UseCompressedOops", &J->Use_Compressed_Oops_address);
@@ -334,20 +339,43 @@
   err = ps_pread(J->P,  J->Universe_narrow_oop_shift_address, &J->Universe_narrow_oop_shift, sizeof(uint32_t));
   CHECK_FAIL(err);
 
-  err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
-                     OFFSET_VirtualSpace_low, &J->CodeCache_low);
-  CHECK_FAIL(err);
-  err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
-                     OFFSET_VirtualSpace_high, &J->CodeCache_high);
-  CHECK_FAIL(err);
-  err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap +
-                     OFFSET_VirtualSpace_low, &J->CodeCache_segmap_low);
-  CHECK_FAIL(err);
-  err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap +
-                     OFFSET_VirtualSpace_high, &J->CodeCache_segmap_high);
-  CHECK_FAIL(err);
+  /* CodeCache_heaps_address points to GrowableArray<CodeHeaps*>, read _data field
+     pointing to the first entry of type CodeCache* in the array */
+  err = read_pointer(J, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_data, &array_data);
+  /* Read _len field containing the number of code heaps */
+  err = ps_pread(J->P, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_len,
+                 &J->Number_of_heaps, sizeof(J->Number_of_heaps));
+
+  /* Allocate memory for heap configurations */
+  J->Heap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
+  J->Heap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
+  J->Heap_segmap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
+  J->Heap_segmap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
+
+  /* Read code heap configurations */
+  for (i = 0; i < J->Number_of_heaps; ++i) {
+    /* Read address of heap */
+    err = read_pointer(J, array_data, &code_heap_address);
+    CHECK_FAIL(err);
 
-  err = ps_pread(J->P, J->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size,
+    err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory +
+                       OFFSET_VirtualSpace_low, &J->Heap_low[i]);
+    CHECK_FAIL(err);
+    err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory +
+                       OFFSET_VirtualSpace_high, &J->Heap_high[i]);
+    CHECK_FAIL(err);
+    err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap +
+                       OFFSET_VirtualSpace_low, &J->Heap_segmap_low[i]);
+    CHECK_FAIL(err);
+    err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap +
+                       OFFSET_VirtualSpace_high, &J->Heap_segmap_high[i]);
+    CHECK_FAIL(err);
+
+    /* Increment pointer to next entry */
+    array_data = array_data + POINTER_SIZE;
+  }
+
+  err = ps_pread(J->P, code_heap_address + OFFSET_CodeHeap_log2_segment_size,
                  &J->SIZE_CodeCache_log2_segment, sizeof(J->SIZE_CodeCache_log2_segment));
   CHECK_FAIL(err);
 
@@ -357,46 +385,57 @@
   return err;
 }
 
-
-static int codecache_contains(jvm_agent_t* J, uint64_t ptr) {
-  /* make sure the code cache is up to date */
-  return (J->CodeCache_low <= ptr && ptr < J->CodeCache_high);
+static int codeheap_contains(int heap_num, jvm_agent_t* J, uint64_t ptr) {
+  return (J->Heap_low[heap_num] <= ptr && ptr < J->Heap_high[heap_num]);
 }
 
-static uint64_t segment_for(jvm_agent_t* J, uint64_t p) {
-  return (p - J->CodeCache_low) >> J->SIZE_CodeCache_log2_segment;
+static int codecache_contains(jvm_agent_t* J, uint64_t ptr) {
+  int i;
+  for (i = 0; i < J->Number_of_heaps; ++i) {
+    if (codeheap_contains(i, J, ptr)) {
+      return 1;
+    }
+  }
+  return 0;
 }
 
-static uint64_t block_at(jvm_agent_t* J, int i) {
-  return J->CodeCache_low + (i << J->SIZE_CodeCache_log2_segment);
+static uint64_t segment_for(int heap_num, jvm_agent_t* J, uint64_t p) {
+  return (p - J->Heap_low[heap_num]) >> J->SIZE_CodeCache_log2_segment;
+}
+
+static uint64_t block_at(int heap_num, jvm_agent_t* J, int i) {
+  return J->Heap_low[heap_num] + (i << J->SIZE_CodeCache_log2_segment);
 }
 
 static int find_start(jvm_agent_t* J, uint64_t ptr, uint64_t *startp) {
   int err;
+  int i;
 
-  *startp = 0;
-  if (J->CodeCache_low <= ptr && ptr < J->CodeCache_high) {
-    int32_t used;
-    uint64_t segment = segment_for(J, ptr);
-    uint64_t block = J->CodeCache_segmap_low;
-    uint8_t tag;
-    err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
-    CHECK_FAIL(err);
-    if (tag == 0xff)
-      return PS_OK;
-    while (tag > 0) {
+  for (i = 0; i < J->Number_of_heaps; ++i) {
+    *startp = 0;
+    if (codeheap_contains(i, J, ptr)) {
+      int32_t used;
+      uint64_t segment = segment_for(i, J, ptr);
+      uint64_t block = J->Heap_segmap_low[i];
+      uint8_t tag;
       err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
       CHECK_FAIL(err);
-      segment -= tag;
+      if (tag == 0xff)
+        return PS_OK;
+      while (tag > 0) {
+        err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
+        CHECK_FAIL(err);
+        segment -= tag;
+      }
+      block = block_at(i, J, segment);
+      err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used));
+      CHECK_FAIL(err);
+      if (used) {
+        *startp = block + SIZE_HeapBlockHeader;
+      }
     }
-    block = block_at(J, segment);
-    err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used));
-    CHECK_FAIL(err);
-    if (used) {
-      *startp = block + SIZE_HeapBlockHeader;
-    }
+    return PS_OK;
   }
-  return PS_OK;
 
  fail:
   return -1;
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -3129,8 +3129,7 @@
   return true;
 }
 
-char* os::reserve_memory_special(size_t size, size_t alignment, char* addr,
-                                 bool exec) {
+char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
   fatal("os::reserve_memory_special should not be called on Solaris.");
   return NULL;
 }
--- a/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -265,7 +265,7 @@
                 CAST_FROM_FN_PTR(address, os::current_frame));
   if (os::is_first_C_frame(&myframe)) {
     // stack is not walkable
-    return frame(NULL, NULL, NULL);
+    return frame(NULL, NULL, false);
   } else {
     return os::get_sender_for_C_frame(&myframe);
   }
--- a/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -327,7 +327,7 @@
   if (t2->is_constant()) {
     switch (t2->tag()) {
       case intTag   : if (t2->as_IntConstant()->value() == 0)  set_canonical(x->x()); return;
-      case longTag  : if (t2->as_IntConstant()->value() == 0)  set_canonical(x->x()); return;
+      case longTag  : if (t2->as_LongConstant()->value() == (jlong)0)  set_canonical(x->x()); return;
       default       : ShouldNotReachHere();
     }
   }
@@ -808,28 +808,41 @@
 
 static bool match_index_and_scale(Instruction*  instr,
                                   Instruction** index,
-                                  int*          log2_scale,
-                                  Instruction** instr_to_unpin) {
-  *instr_to_unpin = NULL;
-
-  // Skip conversion ops
+                                  int*          log2_scale) {
+  // Skip conversion ops. This works only on 32bit because of the implicit l2i that the
+  // unsafe performs.
+#ifndef _LP64
   Convert* convert = instr->as_Convert();
-  if (convert != NULL) {
+  if (convert != NULL && convert->op() == Bytecodes::_i2l) {
+    assert(convert->value()->type() == intType, "invalid input type");
     instr = convert->value();
   }
+#endif
 
   ShiftOp* shift = instr->as_ShiftOp();
   if (shift != NULL) {
-    if (shift->is_pinned()) {
-      *instr_to_unpin = shift;
+    if (shift->op() == Bytecodes::_lshl) {
+      assert(shift->x()->type() == longType, "invalid input type");
+    } else {
+#ifndef _LP64
+      if (shift->op() == Bytecodes::_ishl) {
+        assert(shift->x()->type() == intType, "invalid input type");
+      } else {
+        return false;
+      }
+#else
+      return false;
+#endif
     }
+
+
     // Constant shift value?
     Constant* con = shift->y()->as_Constant();
     if (con == NULL) return false;
     // Well-known type and value?
     IntConstant* val = con->type()->as_IntConstant();
-    if (val == NULL) return false;
-    if (shift->x()->type() != intType) return false;
+    assert(val != NULL, "Should be an int constant");
+
     *index = shift->x();
     int tmp_scale = val->value();
     if (tmp_scale >= 0 && tmp_scale < 4) {
@@ -842,31 +855,42 @@
 
   ArithmeticOp* arith = instr->as_ArithmeticOp();
   if (arith != NULL) {
-    if (arith->is_pinned()) {
-      *instr_to_unpin = arith;
+    // See if either arg is a known constant
+    Constant* con = arith->x()->as_Constant();
+    if (con != NULL) {
+      *index = arith->y();
+    } else {
+      con = arith->y()->as_Constant();
+      if (con == NULL) return false;
+      *index = arith->x();
     }
+    long const_value;
     // Check for integer multiply
-    if (arith->op() == Bytecodes::_imul) {
-      // See if either arg is a known constant
-      Constant* con = arith->x()->as_Constant();
-      if (con != NULL) {
-        *index = arith->y();
+    if (arith->op() == Bytecodes::_lmul) {
+      assert((*index)->type() == longType, "invalid input type");
+      LongConstant* val = con->type()->as_LongConstant();
+      assert(val != NULL, "expecting a long constant");
+      const_value = val->value();
+    } else {
+#ifndef _LP64
+      if (arith->op() == Bytecodes::_imul) {
+        assert((*index)->type() == intType, "invalid input type");
+        IntConstant* val = con->type()->as_IntConstant();
+        assert(val != NULL, "expecting an int constant");
+        const_value = val->value();
       } else {
-        con = arith->y()->as_Constant();
-        if (con == NULL) return false;
-        *index = arith->x();
+        return false;
       }
-      if ((*index)->type() != intType) return false;
-      // Well-known type and value?
-      IntConstant* val = con->type()->as_IntConstant();
-      if (val == NULL) return false;
-      switch (val->value()) {
-      case 1: *log2_scale = 0; return true;
-      case 2: *log2_scale = 1; return true;
-      case 4: *log2_scale = 2; return true;
-      case 8: *log2_scale = 3; return true;
-      default:            return false;
-      }
+#else
+      return false;
+#endif
+    }
+    switch (const_value) {
+    case 1: *log2_scale = 0; return true;
+    case 2: *log2_scale = 1; return true;
+    case 4: *log2_scale = 2; return true;
+    case 8: *log2_scale = 3; return true;
+    default:            return false;
     }
   }
 
@@ -879,29 +903,37 @@
                   Instruction** base,
                   Instruction** index,
                   int*          log2_scale) {
-  Instruction* instr_to_unpin = NULL;
   ArithmeticOp* root = x->base()->as_ArithmeticOp();
   if (root == NULL) return false;
   // Limit ourselves to addition for now
   if (root->op() != Bytecodes::_ladd) return false;
+
+  bool match_found = false;
   // Try to find shift or scale op
-  if (match_index_and_scale(root->y(), index, log2_scale, &instr_to_unpin)) {
+  if (match_index_and_scale(root->y(), index, log2_scale)) {
     *base = root->x();
-  } else if (match_index_and_scale(root->x(), index, log2_scale, &instr_to_unpin)) {
+    match_found = true;
+  } else if (match_index_and_scale(root->x(), index, log2_scale)) {
     *base = root->y();
-  } else if (root->y()->as_Convert() != NULL) {
+    match_found = true;
+  } else if (NOT_LP64(root->y()->as_Convert() != NULL) LP64_ONLY(false)) {
+    // Skipping i2l works only on 32bit because of the implicit l2i that the unsafe performs.
+    // 64bit needs a real sign-extending conversion.
     Convert* convert = root->y()->as_Convert();
-    if (convert->op() == Bytecodes::_i2l && convert->value()->type() == intType) {
+    if (convert->op() == Bytecodes::_i2l) {
+      assert(convert->value()->type() == intType, "should be an int");
       // pick base and index, setting scale at 1
       *base  = root->x();
       *index = convert->value();
       *log2_scale = 0;
-    } else {
-      return false;
+      match_found = true;
     }
-  } else {
-    // doesn't match any expected sequences
-    return false;
+  }
+  // The default solution
+  if (!match_found) {
+    *base = root->x();
+    *index = root->y();
+    *log2_scale = 0;
   }
 
   // If the value is pinned then it will be always be computed so
--- a/hotspot/src/share/vm/c1/c1_Compiler.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_Compiler.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -76,6 +76,11 @@
   }
 }
 
+int Compiler::code_buffer_size() {
+  assert(SegmentedCodeCache, "Should be only used with a segmented code cache");
+  return Compilation::desired_max_code_buffer_size() + Compilation::desired_max_constant_size();
+}
+
 BufferBlob* Compiler::init_buffer_blob() {
   // Allocate buffer blob once at startup since allocation for each
   // compilation seems to be too expensive (at least on Intel win32).
--- a/hotspot/src/share/vm/c1/c1_Compiler.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_Compiler.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -54,6 +54,9 @@
 
   // Print compilation timers and statistics
   virtual void print_timers();
+
+  // Size of the code buffer
+  static int code_buffer_size();
 };
 
 #endif // SHARE_VM_C1_C1_COMPILER_HPP
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -2045,6 +2045,8 @@
   }
 }
 
+// Here UnsafeGetRaw may have x->base() and x->index() be int or long
+// on both 64 and 32 bits. Expecting x->base() to be always long on 64bit.
 void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
   LIRItem base(x->base(), this);
   LIRItem idx(this);
@@ -2059,50 +2061,73 @@
 
   int   log2_scale = 0;
   if (x->has_index()) {
-    assert(x->index()->type()->tag() == intTag, "should not find non-int index");
     log2_scale = x->log2_scale();
   }
 
   assert(!x->has_index() || idx.value() == x->index(), "should match");
 
   LIR_Opr base_op = base.result();
+  LIR_Opr index_op = idx.result();
 #ifndef _LP64
   if (x->base()->type()->tag() == longTag) {
     base_op = new_register(T_INT);
     __ convert(Bytecodes::_l2i, base.result(), base_op);
-  } else {
-    assert(x->base()->type()->tag() == intTag, "must be");
+  }
+  if (x->has_index()) {
+    if (x->index()->type()->tag() == longTag) {
+      LIR_Opr long_index_op = index_op;
+      if (x->index()->type()->is_constant()) {
+        long_index_op = new_register(T_LONG);
+        __ move(index_op, long_index_op);
+      }
+      index_op = new_register(T_INT);
+      __ convert(Bytecodes::_l2i, long_index_op, index_op);
+    } else {
+      assert(x->index()->type()->tag() == intTag, "must be");
+    }
   }
+  // At this point base and index should be all ints.
+  assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
+  assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
+#else
+  if (x->has_index()) {
+    if (x->index()->type()->tag() == intTag) {
+      if (!x->index()->type()->is_constant()) {
+        index_op = new_register(T_LONG);
+        __ convert(Bytecodes::_i2l, idx.result(), index_op);
+      }
+    } else {
+      assert(x->index()->type()->tag() == longTag, "must be");
+      if (x->index()->type()->is_constant()) {
+        index_op = new_register(T_LONG);
+        __ move(idx.result(), index_op);
+      }
+    }
+  }
+  // At this point base is a long non-constant
+  // Index is a long register or a int constant.
+  // We allow the constant to stay an int because that would allow us a more compact encoding by
+  // embedding an immediate offset in the address expression. If we have a long constant, we have to
+  // move it into a register first.
+  assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
+  assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
+                            (index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
 #endif
 
   BasicType dst_type = x->basic_type();
-  LIR_Opr index_op = idx.result();
 
   LIR_Address* addr;
   if (index_op->is_constant()) {
     assert(log2_scale == 0, "must not have a scale");
+    assert(index_op->type() == T_INT, "only int constants supported");
     addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
   } else {
 #ifdef X86
-#ifdef _LP64
-    if (!index_op->is_illegal() && index_op->type() == T_INT) {
-      LIR_Opr tmp = new_pointer_register();
-      __ convert(Bytecodes::_i2l, index_op, tmp);
-      index_op = tmp;
-    }
-#endif
     addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
 #elif defined(ARM)
     addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
 #else
     if (index_op->is_illegal() || log2_scale == 0) {
-#ifdef _LP64
-      if (!index_op->is_illegal() && index_op->type() == T_INT) {
-        LIR_Opr tmp = new_pointer_register();
-        __ convert(Bytecodes::_i2l, index_op, tmp);
-        index_op = tmp;
-      }
-#endif
       addr = new LIR_Address(base_op, index_op, dst_type);
     } else {
       LIR_Opr tmp = new_pointer_register();
@@ -2129,7 +2154,6 @@
   BasicType type = x->basic_type();
 
   if (x->has_index()) {
-    assert(x->index()->type()->tag() == intTag, "should not find non-int index");
     log2_scale = x->log2_scale();
   }
 
@@ -2152,38 +2176,39 @@
   set_no_result(x);
 
   LIR_Opr base_op = base.result();
+  LIR_Opr index_op = idx.result();
+
 #ifndef _LP64
   if (x->base()->type()->tag() == longTag) {
     base_op = new_register(T_INT);
     __ convert(Bytecodes::_l2i, base.result(), base_op);
-  } else {
-    assert(x->base()->type()->tag() == intTag, "must be");
+  }
+  if (x->has_index()) {
+    if (x->index()->type()->tag() == longTag) {
+      index_op = new_register(T_INT);
+      __ convert(Bytecodes::_l2i, idx.result(), index_op);
+    }
   }
+  // At this point base and index should be all ints and not constants
+  assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
+  assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
+#else
+  if (x->has_index()) {
+    if (x->index()->type()->tag() == intTag) {
+      index_op = new_register(T_LONG);
+      __ convert(Bytecodes::_i2l, idx.result(), index_op);
+    }
+  }
+  // At this point base and index are long and non-constant
+  assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long");
+  assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long");
 #endif
 
-  LIR_Opr index_op = idx.result();
   if (log2_scale != 0) {
     // temporary fix (platform dependent code without shift on Intel would be better)
-    index_op = new_pointer_register();
-#ifdef _LP64
-    if(idx.result()->type() == T_INT) {
-      __ convert(Bytecodes::_i2l, idx.result(), index_op);
-    } else {
-#endif
-      // TODO: ARM also allows embedded shift in the address
-      __ move(idx.result(), index_op);
-#ifdef _LP64
-    }
-#endif
+    // TODO: ARM also allows embedded shift in the address
     __ shift_left(index_op, log2_scale, index_op);
   }
-#ifdef _LP64
-  else if(!index_op->is_illegal() && index_op->type() == T_INT) {
-    LIR_Opr tmp = new_pointer_register();
-    __ convert(Bytecodes::_i2l, index_op, tmp);
-    index_op = tmp;
-  }
-#endif
 
   LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
   __ move(value.result(), addr);
--- a/hotspot/src/share/vm/c1/c1_globals.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_globals.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -287,9 +287,6 @@
   develop(bool, InstallMethods, true,                                       \
           "Install methods at the end of successful compilations")          \
                                                                             \
-  product(intx, CompilationRepeat, 0,                                       \
-          "Number of times to recompile method before returning result")    \
-                                                                            \
   develop(intx, NMethodSizeLimit, (64*K)*wordSize,                          \
           "Maximum size of a compiled method.")                             \
                                                                             \
--- a/hotspot/src/share/vm/ci/ciEnv.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -34,6 +34,7 @@
 #include "ci/ciUtilities.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
+#include "code/codeCache.hpp"
 #include "code/scopeDesc.hpp"
 #include "compiler/compileBroker.hpp"
 #include "compiler/compileLog.hpp"
@@ -1085,7 +1086,7 @@
   } else {
     // The CodeCache is full. Print out warning and disable compilation.
     record_failure("code cache is full");
-    CompileBroker::handle_full_code_cache();
+    CompileBroker::handle_full_code_cache(CodeCache::get_code_blob_type(comp_level));
   }
 }
 
--- a/hotspot/src/share/vm/classfile/verifier.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/classfile/verifier.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -1694,8 +1694,6 @@
   constantPoolHandle cp (THREAD, _method->constants());
 
   for(int i = 0; i < exlength; i++) {
-    //reacquire the table in case a GC happened
-    ExceptionTable exhandlers(_method());
     u2 start_pc = exhandlers.start_pc(i);
     u2 end_pc = exhandlers.end_pc(i);
     u2 handler_pc = exhandlers.handler_pc(i);
@@ -1803,8 +1801,6 @@
   ExceptionTable exhandlers(_method());
   int exlength = exhandlers.length();
   for(int i = 0; i < exlength; i++) {
-    //reacquire the table in case a GC happened
-    ExceptionTable exhandlers(_method());
     u2 start_pc = exhandlers.start_pc(i);
     u2 end_pc = exhandlers.end_pc(i);
     u2 handler_pc = exhandlers.handler_pc(i);
--- a/hotspot/src/share/vm/code/codeBlob.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/code/codeBlob.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -229,14 +229,11 @@
   return blob;
 }
 
-
 void* BufferBlob::operator new(size_t s, unsigned size, bool is_critical) throw() {
-  void* p = CodeCache::allocate(size, is_critical);
-  return p;
+  return CodeCache::allocate(size, CodeBlobType::NonMethod, is_critical);
 }
 
-
-void BufferBlob::free( BufferBlob *blob ) {
+void BufferBlob::free(BufferBlob *blob) {
   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
   blob->flush();
   {
@@ -299,7 +296,6 @@
   return blob;
 }
 
-
 //----------------------------------------------------------------------------------------------------
 // Implementation of RuntimeStub
 
@@ -340,14 +336,14 @@
 
 
 void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
-  void* p = CodeCache::allocate(size, true);
+  void* p = CodeCache::allocate(size, CodeBlobType::NonMethod, true);
   if (!p) fatal("Initial size of CodeCache is too small");
   return p;
 }
 
 // operator new shared by all singletons:
 void* SingletonBlob::operator new(size_t s, unsigned size) throw() {
-  void* p = CodeCache::allocate(size, true);
+  void* p = CodeCache::allocate(size, CodeBlobType::NonMethod, true);
   if (!p) fatal("Initial size of CodeCache is too small");
   return p;
 }
--- a/hotspot/src/share/vm/code/codeBlob.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/code/codeBlob.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -30,6 +30,18 @@
 #include "runtime/frame.hpp"
 #include "runtime/handles.hpp"
 
+// CodeBlob Types
+// Used in the CodeCache to assign CodeBlobs to different CodeHeaps
+struct CodeBlobType {
+  enum {
+    MethodNonProfiled   = 0,    // Execution level 1 and 4 (non-profiled) nmethods (including native nmethods)
+    MethodProfiled      = 1,    // Execution level 2 and 3 (profiled) nmethods
+    NonMethod           = 2,    // Non-methods like Buffers, Adapters and Runtime Stubs
+    All                 = 3,    // All types (No code cache segmentation)
+    NumTypes            = 4     // Number of CodeBlobTypes
+  };
+};
+
 // CodeBlob - superclass for all entries in the CodeCache.
 //
 // Suptypes are:
@@ -385,9 +397,6 @@
     return (pc == unpack_pc || (pc + frame::pc_return_offset) == unpack_pc);
   }
 
-
-
-
   // GC for args
   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ }
 
--- a/hotspot/src/share/vm/code/codeCache.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/code/codeCache.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -44,12 +44,21 @@
 #include "runtime/icache.hpp"
 #include "runtime/java.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/compilationPolicy.hpp"
 #include "services/memoryService.hpp"
 #include "trace/tracing.hpp"
 #include "utilities/xmlstream.hpp"
+#ifdef COMPILER1
+#include "c1/c1_Compilation.hpp"
+#include "c1/c1_Compiler.hpp"
+#endif
+#ifdef COMPILER2
+#include "opto/c2compiler.hpp"
+#include "opto/compile.hpp"
+#include "opto/node.hpp"
+#endif
 
 // Helper class for printing in CodeCache
-
 class CodeBlob_sizes {
  private:
   int count;
@@ -115,64 +124,216 @@
   }
 };
 
-// CodeCache implementation
+// Iterate over all CodeHeaps
+#define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
+// Iterate over all CodeBlobs (cb) on the given CodeHeap
+#define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
 
-CodeHeap * CodeCache::_heap = new CodeHeap();
+address CodeCache::_low_bound = 0;
+address CodeCache::_high_bound = 0;
 int CodeCache::_number_of_blobs = 0;
 int CodeCache::_number_of_adapters = 0;
 int CodeCache::_number_of_nmethods = 0;
 int CodeCache::_number_of_nmethods_with_dependencies = 0;
 bool CodeCache::_needs_cache_clean = false;
 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
-
 int CodeCache::_codemem_full_count = 0;
 
-CodeBlob* CodeCache::first() {
-  assert_locked_or_safepoint(CodeCache_lock);
-  return (CodeBlob*)_heap->first();
-}
+// Initialize array of CodeHeaps
+GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
+
+void CodeCache::initialize_heaps() {
+  // Determine size of compiler buffers
+  size_t code_buffers_size = 0;
+#ifdef COMPILER1
+  // C1 temporary code buffers (see Compiler::init_buffer_blob())
+  const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
+  code_buffers_size += c1_count * Compiler::code_buffer_size();
+#endif
+#ifdef COMPILER2
+  // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
+  const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
+  // Initial size of constant table (this may be increased if a compiled method needs more space)
+  code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
+#endif
 
+  // Calculate default CodeHeap sizes if not set by user
+  if (!FLAG_IS_CMDLINE(NonMethodCodeHeapSize) && !FLAG_IS_CMDLINE(ProfiledCodeHeapSize)
+      && !FLAG_IS_CMDLINE(NonProfiledCodeHeapSize)) {
+    // Increase default NonMethodCodeHeapSize to account for compiler buffers
+    FLAG_SET_ERGO(uintx, NonMethodCodeHeapSize, NonMethodCodeHeapSize + code_buffers_size);
+
+    // Check if we have enough space for the non-method code heap
+    if (ReservedCodeCacheSize > NonMethodCodeHeapSize) {
+      // Use the default value for NonMethodCodeHeapSize and one half of the
+      // remaining size for non-profiled methods and one half for profiled methods
+      size_t remaining_size = ReservedCodeCacheSize - NonMethodCodeHeapSize;
+      size_t profiled_size = remaining_size / 2;
+      size_t non_profiled_size = remaining_size - profiled_size;
+      FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
+      FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);
+    } else {
+      // Use all space for the non-method heap and set other heaps to minimal size
+      FLAG_SET_ERGO(uintx, NonMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2);
+      FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, os::vm_page_size());
+      FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, os::vm_page_size());
+    }
+  }
 
-CodeBlob* CodeCache::next(CodeBlob* cb) {
-  assert_locked_or_safepoint(CodeCache_lock);
-  return (CodeBlob*)_heap->next(cb);
-}
+  // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
+  if(!heap_available(CodeBlobType::MethodProfiled)) {
+    FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize);
+    FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
+  }
+  // We do not need the non-profiled CodeHeap, use all space for the non-method CodeHeap
+  if(!heap_available(CodeBlobType::MethodNonProfiled)) {
+    FLAG_SET_ERGO(uintx, NonMethodCodeHeapSize, NonMethodCodeHeapSize + NonProfiledCodeHeapSize);
+    FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
+  }
+
+  // Make sure we have enough space for VM internal code
+  uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace;
+  if (NonMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
+    vm_exit_during_initialization("Not enough space in non-method code heap to run VM.");
+  }
+  guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
+
+  // Align reserved sizes of CodeHeaps
+  size_t non_method_size    = ReservedCodeSpace::allocation_align_size_up(NonMethodCodeHeapSize);
+  size_t profiled_size      = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
+  size_t non_profiled_size  = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
 
+  // Compute initial sizes of CodeHeaps
+  size_t init_non_method_size   = MIN2(InitialCodeCacheSize, non_method_size);
+  size_t init_profiled_size     = MIN2(InitialCodeCacheSize, profiled_size);
+  size_t init_non_profiled_size = MIN2(InitialCodeCacheSize, non_profiled_size);
 
-CodeBlob* CodeCache::alive(CodeBlob *cb) {
-  assert_locked_or_safepoint(CodeCache_lock);
-  while (cb != NULL && !cb->is_alive()) cb = next(cb);
-  return cb;
+  // Reserve one continuous chunk of memory for CodeHeaps and split it into
+  // parts for the individual heaps. The memory layout looks like this:
+  // ---------- high -----------
+  //    Non-profiled nmethods
+  //      Profiled nmethods
+  //         Non-methods
+  // ---------- low ------------
+  ReservedCodeSpace rs = reserve_heap_memory(non_profiled_size + profiled_size + non_method_size);
+  ReservedSpace non_method_space    = rs.first_part(non_method_size);
+  ReservedSpace rest                = rs.last_part(non_method_size);
+  ReservedSpace profiled_space      = rest.first_part(profiled_size);
+  ReservedSpace non_profiled_space  = rest.last_part(profiled_size);
+
+  // Non-methods (stubs, adapters, ...)
+  add_heap(non_method_space, "non-methods", init_non_method_size, CodeBlobType::NonMethod);
+  // Tier 2 and tier 3 (profiled) methods
+  add_heap(profiled_space, "profiled nmethods", init_profiled_size, CodeBlobType::MethodProfiled);
+  // Tier 1 and tier 4 (non-profiled) methods and native methods
+  add_heap(non_profiled_space, "non-profiled nmethods", init_non_profiled_size, CodeBlobType::MethodNonProfiled);
 }
 
+ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
+  // Determine alignment
+  const size_t page_size = os::can_execute_large_page_memory() ?
+          MIN2(os::page_size_for_region(InitialCodeCacheSize, 8),
+               os::page_size_for_region(size, 8)) :
+          os::vm_page_size();
+  const size_t granularity = os::vm_allocation_granularity();
+  const size_t r_align = MAX2(page_size, granularity);
+  const size_t r_size = align_size_up(size, r_align);
+  const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
+    MAX2(page_size, granularity);
 
-nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
-  assert_locked_or_safepoint(CodeCache_lock);
-  while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb);
-  return (nmethod*)cb;
+  ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
+
+  // Initialize bounds
+  _low_bound = (address)rs.base();
+  _high_bound = _low_bound + rs.size();
+
+  return rs;
+}
+
+bool CodeCache::heap_available(int code_blob_type) {
+  if (!SegmentedCodeCache) {
+    // No segmentation: use a single code heap
+    return (code_blob_type == CodeBlobType::All);
+  } else if ((Arguments::mode() == Arguments::_int) ||
+             (TieredStopAtLevel == CompLevel_none)) {
+    // Interpreter only: we don't need any method code heaps
+    return (code_blob_type == CodeBlobType::NonMethod);
+  } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) {
+    // Tiered compilation: use all code heaps
+    return (code_blob_type < CodeBlobType::All);
+  } else {
+    // No TieredCompilation: we only need the non-method and non-profiled code heap
+    return (code_blob_type == CodeBlobType::NonMethod) ||
+           (code_blob_type == CodeBlobType::MethodNonProfiled);
+  }
 }
 
-nmethod* CodeCache::first_nmethod() {
-  assert_locked_or_safepoint(CodeCache_lock);
-  CodeBlob* cb = first();
-  while (cb != NULL && !cb->is_nmethod()) {
-    cb = next(cb);
+void CodeCache::add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type) {
+  // Check if heap is needed
+  if (!heap_available(code_blob_type)) {
+    return;
   }
-  return (nmethod*)cb;
+
+  // Create CodeHeap
+  CodeHeap* heap = new CodeHeap(name, code_blob_type);
+  _heaps->append(heap);
+
+  // Reserve Space
+  size_initial = round_to(size_initial, os::vm_page_size());
+
+  if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
+    vm_exit_during_initialization("Could not reserve enough space for code cache");
+  }
+
+  // Register the CodeHeap
+  MemoryService::add_code_heap_memory_pool(heap, name);
+}
+
+CodeHeap* CodeCache::get_code_heap(CodeBlob* cb) {
+  assert(cb != NULL, "CodeBlob is null");
+  FOR_ALL_HEAPS(heap) {
+    if ((*heap)->contains(cb)) {
+      return *heap;
+    }
+  }
+  ShouldNotReachHere();
+  return NULL;
 }
 
-nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
+CodeHeap* CodeCache::get_code_heap(int code_blob_type) {
+  FOR_ALL_HEAPS(heap) {
+    if ((*heap)->accepts(code_blob_type)) {
+      return *heap;
+    }
+  }
+  return NULL;
+}
+
+CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
   assert_locked_or_safepoint(CodeCache_lock);
-  cb = next(cb);
-  while (cb != NULL && !cb->is_nmethod()) {
-    cb = next(cb);
-  }
-  return (nmethod*)cb;
+  assert(heap != NULL, "heap is null");
+  return (CodeBlob*)heap->first();
 }
 
-static size_t maxCodeCacheUsed = 0;
+CodeBlob* CodeCache::first_blob(int code_blob_type) {
+  if (heap_available(code_blob_type)) {
+    return first_blob(get_code_heap(code_blob_type));
+  } else {
+    return NULL;
+  }
+}
 
-CodeBlob* CodeCache::allocate(int size, bool is_critical) {
+CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
+  assert_locked_or_safepoint(CodeCache_lock);
+  assert(heap != NULL, "heap is null");
+  return (CodeBlob*)heap->next(cb);
+}
+
+CodeBlob* CodeCache::next_blob(CodeBlob* cb) {
+  return next_blob(get_code_heap(cb), cb);
+}
+
+CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) {
   // Do not seize the CodeCache lock here--if the caller has not
   // already done so, we are going to lose bigtime, since the code
   // cache will contain a garbage CodeBlob until the caller can
@@ -184,22 +345,34 @@
     return NULL;
   }
   CodeBlob* cb = NULL;
+
+  // Get CodeHeap for the given CodeBlobType
+  CodeHeap* heap = get_code_heap(SegmentedCodeCache ? code_blob_type : CodeBlobType::All);
+  assert (heap != NULL, "heap is null");
+
   while (true) {
-    cb = (CodeBlob*)_heap->allocate(size, is_critical);
+    cb = (CodeBlob*)heap->allocate(size, is_critical);
     if (cb != NULL) break;
-    if (!_heap->expand_by(CodeCacheExpansionSize)) {
+    if (!heap->expand_by(CodeCacheExpansionSize)) {
       // Expansion failed
+      if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonMethod)) {
+        // Fallback solution: Store non-method code in the non-profiled code heap
+        return allocate(size, CodeBlobType::MethodNonProfiled, is_critical);
+      }
       return NULL;
     }
     if (PrintCodeCacheExtension) {
       ResourceMark rm;
-      tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
-                    (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(),
-                    (address)_heap->high() - (address)_heap->low_boundary());
+      if (SegmentedCodeCache) {
+        tty->print("Code heap '%s'", heap->name());
+      } else {
+        tty->print("Code cache");
+      }
+      tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
+                    (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
+                    (address)heap->high() - (address)heap->low_boundary());
     }
   }
-  maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -
-                          (address)_heap->low_boundary()) - unallocated_capacity());
   print_trace("allocation", cb, size);
   _number_of_blobs++;
   return cb;
@@ -220,12 +393,12 @@
   }
   _number_of_blobs--;
 
-  _heap->deallocate(cb);
+  // Get heap for given CodeBlob and deallocate
+  get_code_heap(cb)->deallocate(cb);
 
   assert(_number_of_blobs >= 0, "sanity check");
 }
 
-
 void CodeCache::commit(CodeBlob* cb) {
   // this is called by nmethod::nmethod, which must already own CodeCache_lock
   assert_locked_or_safepoint(CodeCache_lock);
@@ -243,89 +416,102 @@
   ICache::invalidate_range(cb->content_begin(), cb->content_size());
 }
 
-
-// Iteration over CodeBlobs
-
-#define FOR_ALL_BLOBS(var)       for (CodeBlob *var =       first() ; var != NULL; var =       next(var) )
-#define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
-#define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
-#define FOR_ALL_NMETHODS(var) for (nmethod *var = first_nmethod(); var != NULL; var = next_nmethod(var))
-
-
 bool CodeCache::contains(void *p) {
   // It should be ok to call contains without holding a lock
-  return _heap->contains(p);
+  FOR_ALL_HEAPS(heap) {
+    if ((*heap)->contains(p)) {
+      return true;
+    }
+  }
+  return false;
 }
 
-
-// This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
-// looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
+// This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not
+// looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain
 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 CodeBlob* CodeCache::find_blob(void* start) {
   CodeBlob* result = find_blob_unsafe(start);
-  if (result == NULL) return NULL;
   // We could potentially look up non_entrant methods
-  guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
+  guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
   return result;
 }
 
+// Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
+// what you are doing)
+CodeBlob* CodeCache::find_blob_unsafe(void* start) {
+  // NMT can walk the stack before code cache is created
+  if (_heaps == NULL || _heaps->is_empty()) return NULL;
+
+  FOR_ALL_HEAPS(heap) {
+    CodeBlob* result = (CodeBlob*) (*heap)->find_start(start);
+    if (result != NULL && result->blob_contains((address)start)) {
+      return result;
+    }
+  }
+  return NULL;
+}
+
 nmethod* CodeCache::find_nmethod(void* start) {
-  CodeBlob *cb = find_blob(start);
-  assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
+  CodeBlob* cb = find_blob(start);
+  assert(cb->is_nmethod(), "did not find an nmethod");
   return (nmethod*)cb;
 }
 
-
 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
   assert_locked_or_safepoint(CodeCache_lock);
-  FOR_ALL_BLOBS(p) {
-    f(p);
+  FOR_ALL_HEAPS(heap) {
+    FOR_ALL_BLOBS(cb, *heap) {
+      f(cb);
+    }
   }
 }
 
-
 void CodeCache::nmethods_do(void f(nmethod* nm)) {
   assert_locked_or_safepoint(CodeCache_lock);
-  FOR_ALL_BLOBS(nm) {
-    if (nm->is_nmethod()) f((nmethod*)nm);
+  NMethodIterator iter;
+  while(iter.next()) {
+    f(iter.method());
   }
 }
 
 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {
   assert_locked_or_safepoint(CodeCache_lock);
-  FOR_ALL_ALIVE_NMETHODS(nm) {
-    f(nm);
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    f(iter.method());
   }
 }
 
 int CodeCache::alignment_unit() {
-  return (int)_heap->alignment_unit();
+  return (int)_heaps->first()->alignment_unit();
 }
 
-
 int CodeCache::alignment_offset() {
-  return (int)_heap->alignment_offset();
+  return (int)_heaps->first()->alignment_offset();
 }
 
-
-// Mark nmethods for unloading if they contain otherwise unreachable
-// oops.
+// Mark nmethods for unloading if they contain otherwise unreachable oops.
 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
   assert_locked_or_safepoint(CodeCache_lock);
-  FOR_ALL_ALIVE_NMETHODS(nm) {
-    nm->do_unloading(is_alive, unloading_occurred);
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    iter.method()->do_unloading(is_alive, unloading_occurred);
   }
 }
 
 void CodeCache::blobs_do(CodeBlobClosure* f) {
   assert_locked_or_safepoint(CodeCache_lock);
-  FOR_ALL_ALIVE_BLOBS(cb) {
-    f->do_code_blob(cb);
+  FOR_ALL_HEAPS(heap) {
+    FOR_ALL_BLOBS(cb, *heap) {
+      if (cb->is_alive()) {
+        f->do_code_blob(cb);
 
 #ifdef ASSERT
-    if (cb->is_nmethod())
-      ((nmethod*)cb)->verify_scavenge_root_oops();
+        if (cb->is_nmethod())
+        ((nmethod*)cb)->verify_scavenge_root_oops();
 #endif //ASSERT
+      }
+    }
   }
 }
 
@@ -453,44 +639,39 @@
 
 // Temporarily mark nmethods that are claimed to be on the non-perm list.
 void CodeCache::mark_scavenge_root_nmethods() {
-  FOR_ALL_ALIVE_BLOBS(cb) {
-    if (cb->is_nmethod()) {
-      nmethod *nm = (nmethod*)cb;
-      assert(nm->scavenge_root_not_marked(), "clean state");
-      if (nm->on_scavenge_root_list())
-        nm->set_scavenge_root_marked();
-    }
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* nm = iter.method();
+    assert(nm->scavenge_root_not_marked(), "clean state");
+    if (nm->on_scavenge_root_list())
+      nm->set_scavenge_root_marked();
   }
 }
 
 // If the closure is given, run it on the unlisted nmethods.
 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
-  FOR_ALL_ALIVE_BLOBS(cb) {
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* nm = iter.method();
     bool call_f = (f_or_null != NULL);
-    if (cb->is_nmethod()) {
-      nmethod *nm = (nmethod*)cb;
-      assert(nm->scavenge_root_not_marked(), "must be already processed");
-      if (nm->on_scavenge_root_list())
-        call_f = false;  // don't show this one to the client
-      nm->verify_scavenge_root_oops();
-    } else {
-      call_f = false;   // not an nmethod
-    }
-    if (call_f)  f_or_null->do_code_blob(cb);
+    assert(nm->scavenge_root_not_marked(), "must be already processed");
+    if (nm->on_scavenge_root_list())
+      call_f = false;  // don't show this one to the client
+    nm->verify_scavenge_root_oops();
+    if (call_f)  f_or_null->do_code_blob(nm);
   }
 }
 #endif //PRODUCT
 
 void CodeCache::verify_clean_inline_caches() {
 #ifdef ASSERT
-  FOR_ALL_ALIVE_BLOBS(cb) {
-    if (cb->is_nmethod()) {
-      nmethod* nm = (nmethod*)cb;
-      assert(!nm->is_unloaded(), "Tautology");
-      nm->verify_clean_inline_caches();
-      nm->verify();
-    }
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* nm = iter.method();
+    assert(!nm->is_unloaded(), "Tautology");
+    nm->verify_clean_inline_caches();
+    nm->verify();
   }
 #endif
 }
@@ -499,10 +680,12 @@
 #ifdef ASSERT
   // make sure that we aren't leaking icholders
   int count = 0;
-  FOR_ALL_BLOBS(cb) {
-    if (cb->is_nmethod()) {
-      nmethod* nm = (nmethod*)cb;
-      count += nm->verify_icholder_relocations();
+  FOR_ALL_HEAPS(heap) {
+    FOR_ALL_BLOBS(cb, *heap) {
+      if (cb->is_nmethod()) {
+        nmethod* nm = (nmethod*)cb;
+        count += nm->verify_icholder_relocations();
+      }
     }
   }
 
@@ -516,16 +699,15 @@
 
 void CodeCache::gc_epilogue() {
   assert_locked_or_safepoint(CodeCache_lock);
-  FOR_ALL_ALIVE_BLOBS(cb) {
-    if (cb->is_nmethod()) {
-      nmethod *nm = (nmethod*)cb;
-      assert(!nm->is_unloaded(), "Tautology");
-      if (needs_cache_clean()) {
-        nm->cleanup_inline_caches();
-      }
-      DEBUG_ONLY(nm->verify());
-      DEBUG_ONLY(nm->verify_oop_relocations());
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* nm = iter.method();
+    assert(!nm->is_unloaded(), "Tautology");
+    if (needs_cache_clean()) {
+      nm->cleanup_inline_caches();
     }
+    DEBUG_ONLY(nm->verify());
+    DEBUG_ONLY(nm->verify_oop_relocations());
   }
   set_needs_cache_clean(false);
   prune_scavenge_root_nmethods();
@@ -536,37 +718,89 @@
 void CodeCache::verify_oops() {
   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   VerifyOopClosure voc;
-  FOR_ALL_ALIVE_BLOBS(cb) {
-    if (cb->is_nmethod()) {
-      nmethod *nm = (nmethod*)cb;
-      nm->oops_do(&voc);
-      nm->verify_oop_relocations();
-    }
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* nm = iter.method();
+    nm->oops_do(&voc);
+    nm->verify_oop_relocations();
   }
 }
 
-
-address CodeCache::first_address() {
-  assert_locked_or_safepoint(CodeCache_lock);
-  return (address)_heap->low_boundary();
+size_t CodeCache::capacity() {
+  size_t cap = 0;
+  FOR_ALL_HEAPS(heap) {
+    cap += (*heap)->capacity();
+  }
+  return cap;
 }
 
+size_t CodeCache::unallocated_capacity() {
+  size_t unallocated_cap = 0;
+  FOR_ALL_HEAPS(heap) {
+    unallocated_cap += (*heap)->unallocated_capacity();
+  }
+  return unallocated_cap;
+}
 
-address CodeCache::last_address() {
-  assert_locked_or_safepoint(CodeCache_lock);
-  return (address)_heap->high();
+size_t CodeCache::max_capacity() {
+  size_t max_cap = 0;
+  FOR_ALL_HEAPS(heap) {
+    max_cap += (*heap)->max_capacity();
+  }
+  return max_cap;
 }
 
 /**
- * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
+ * Returns true if a CodeHeap is full and sets code_blob_type accordingly.
+ */
+bool CodeCache::is_full(int* code_blob_type) {
+  FOR_ALL_HEAPS(heap) {
+    if ((*heap)->unallocated_capacity() < CodeCacheMinimumFreeSpace) {
+      *code_blob_type = (*heap)->code_blob_type();
+      return true;
+    }
+  }
+  return false;
+}
+
+/**
+ * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
  * is free, reverse_free_ratio() returns 4.
  */
-double CodeCache::reverse_free_ratio() {
-  double unallocated_capacity = (double)(CodeCache::unallocated_capacity() - CodeCacheMinimumFreeSpace);
-  double max_capacity = (double)CodeCache::max_capacity();
+double CodeCache::reverse_free_ratio(int code_blob_type) {
+  CodeHeap* heap = get_code_heap(code_blob_type);
+  if (heap == NULL) {
+    return 0;
+  }
+  double unallocated_capacity = (double)(heap->unallocated_capacity() - CodeCacheMinimumFreeSpace);
+  double max_capacity = (double)heap->max_capacity();
   return max_capacity / unallocated_capacity;
 }
 
+size_t CodeCache::bytes_allocated_in_freelists() {
+  size_t allocated_bytes = 0;
+  FOR_ALL_HEAPS(heap) {
+    allocated_bytes += (*heap)->allocated_in_freelist();
+  }
+  return allocated_bytes;
+}
+
+int CodeCache::allocated_segments() {
+  int number_of_segments = 0;
+  FOR_ALL_HEAPS(heap) {
+    number_of_segments += (*heap)->allocated_segments();
+  }
+  return number_of_segments;
+}
+
+size_t CodeCache::freelists_length() {
+  size_t length = 0;
+  FOR_ALL_HEAPS(heap) {
+    length += (*heap)->freelist_length();
+  }
+  return length;
+}
+
 void icache_init();
 
 void CodeCache::initialize() {
@@ -579,14 +813,16 @@
   // the code cache to the page size.  In particular, Solaris is moving to a larger
   // default page size.
   CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
-  InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size());
-  ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size());
-  if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) {
-    vm_exit_during_initialization("Could not reserve enough space for code cache");
+
+  if (SegmentedCodeCache) {
+    // Use multiple code heaps
+    initialize_heaps();
+  } else {
+    // Use a single code heap
+    ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
+    add_heap(rs, "Code heap", InitialCodeCacheSize, CodeBlobType::All);
   }
 
-  MemoryService::add_code_heap_memory_pool(_heap);
-
   // Initialize ICache flush mechanism
   // This service is needed for os::register_code_area
   icache_init();
@@ -594,10 +830,9 @@
   // Give OS a chance to register generated code area.
   // This is used on Windows 64 bit platforms to register
   // Structured Exception Handlers for our generated code.
-  os::register_code_area(_heap->low_boundary(), _heap->high_boundary());
+  os::register_code_area((char*)low_bound(), (char*)high_bound());
 }
 
-
 void codeCache_init() {
   CodeCache::initialize();
 }
@@ -610,8 +845,9 @@
 
 void CodeCache::clear_inline_caches() {
   assert_locked_or_safepoint(CodeCache_lock);
-  FOR_ALL_ALIVE_NMETHODS(nm) {
-    nm->clear_inline_caches();
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    iter.method()->clear_inline_caches();
   }
 }
 
@@ -666,7 +902,9 @@
     }
   }
 
-  FOR_ALL_ALIVE_NMETHODS(nm) {
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* nm = iter.method();
     if (nm->is_marked_for_deoptimization()) {
       // ...Already marked in the previous pass; don't count it again.
     } else if (nm->is_evol_dependent_on(dependee())) {
@@ -687,19 +925,22 @@
 // Deoptimize all methods
 void CodeCache::mark_all_nmethods_for_deoptimization() {
   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-  FOR_ALL_ALIVE_NMETHODS(nm) {
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* nm = iter.method();
     if (!nm->method()->is_method_handle_intrinsic()) {
       nm->mark_for_deoptimization();
     }
   }
 }
 
-
 int CodeCache::mark_for_deoptimization(Method* dependee) {
   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   int number_of_marked_CodeBlobs = 0;
 
-  FOR_ALL_ALIVE_NMETHODS(nm) {
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* nm = iter.method();
     if (nm->is_dependent_on_method(dependee)) {
       ResourceMark rm;
       nm->mark_for_deoptimization();
@@ -712,7 +953,9 @@
 
 void CodeCache::make_marked_nmethods_zombies() {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
-  FOR_ALL_ALIVE_NMETHODS(nm) {
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* nm = iter.method();
     if (nm->is_marked_for_deoptimization()) {
 
       // If the nmethod has already been made non-entrant and it can be converted
@@ -733,7 +976,9 @@
 
 void CodeCache::make_marked_nmethods_not_entrant() {
   assert_locked_or_safepoint(CodeCache_lock);
-  FOR_ALL_ALIVE_NMETHODS(nm) {
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* nm = iter.method();
     if (nm->is_marked_for_deoptimization()) {
       nm->make_not_entrant();
     }
@@ -741,23 +986,55 @@
 }
 
 void CodeCache::verify() {
-  _heap->verify();
-  FOR_ALL_ALIVE_BLOBS(p) {
-    p->verify();
+  assert_locked_or_safepoint(CodeCache_lock);
+  FOR_ALL_HEAPS(heap) {
+    (*heap)->verify();
+    FOR_ALL_BLOBS(cb, *heap) {
+      if (cb->is_alive()) {
+        cb->verify();
+      }
+    }
   }
 }
 
-void CodeCache::report_codemem_full() {
+// A CodeHeap is full. Print out warning and report event.
+void CodeCache::report_codemem_full(int code_blob_type, bool print) {
+  // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
+  CodeHeap* heap = get_code_heap(SegmentedCodeCache ? code_blob_type : CodeBlobType::All);
+
+  if (!heap->was_full() || print) {
+    // Not yet reported for this heap, report
+    heap->report_full();
+    if (SegmentedCodeCache) {
+      warning("CodeHeap for %s is full. Compiler has been disabled.", CodeCache::get_code_heap_name(code_blob_type));
+      warning("Try increasing the code heap size using -XX:%s=",
+          (code_blob_type == CodeBlobType::MethodNonProfiled) ? "NonProfiledCodeHeapSize" : "ProfiledCodeHeapSize");
+    } else {
+      warning("CodeCache is full. Compiler has been disabled.");
+      warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
+    }
+    ResourceMark rm;
+    stringStream s;
+    // Dump code cache  into a buffer before locking the tty,
+    {
+      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+      print_summary(&s);
+    }
+    ttyLocker ttyl;
+    tty->print("%s", s.as_string());
+  }
+
   _codemem_full_count++;
   EventCodeCacheFull event;
   if (event.should_commit()) {
-    event.set_startAddress((u8)low_bound());
-    event.set_commitedTopAddress((u8)high());
-    event.set_reservedTopAddress((u8)high_bound());
+    event.set_codeBlobType((u1)code_blob_type);
+    event.set_startAddress((u8)heap->low_boundary());
+    event.set_commitedTopAddress((u8)heap->high());
+    event.set_reservedTopAddress((u8)heap->high_boundary());
     event.set_entryCount(nof_blobs());
     event.set_methodCount(nof_nmethods());
     event.set_adaptorCount(nof_adapters());
-    event.set_unallocatedCapacity(unallocated_capacity()/K);
+    event.set_unallocatedCapacity(heap->unallocated_capacity()/K);
     event.set_fullCount(_codemem_full_count);
     event.commit();
   }
@@ -765,15 +1042,17 @@
 
 void CodeCache::print_memory_overhead() {
   size_t wasted_bytes = 0;
-  CodeBlob *cb;
-  for (cb = first(); cb != NULL; cb = next(cb)) {
-    HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
-    wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
+  FOR_ALL_HEAPS(heap) {
+      CodeHeap* curr_heap = *heap;
+      for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) {
+        HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
+        wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
+      }
   }
   // Print bytes that are allocated in the freelist
   ttyLocker ttl;
-  tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT,    freelist_length());
-  tty->print_cr("Allocated in freelist:          " SSIZE_FORMAT "kB",  bytes_allocated_in_freelist()/K);
+  tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT,       freelists_length());
+  tty->print_cr("Allocated in freelist:          " SSIZE_FORMAT "kB",  bytes_allocated_in_freelists()/K);
   tty->print_cr("Unused bytes in CodeBlobs:      " SSIZE_FORMAT "kB",  (wasted_bytes/K));
   tty->print_cr("Segment map size:               " SSIZE_FORMAT "kB",  allocated_segments()/K); // 1 byte per segment
 }
@@ -808,43 +1087,48 @@
   int max_nm_size = 0;
   ResourceMark rm;
 
-  CodeBlob *cb;
-  for (cb = first(); cb != NULL; cb = next(cb)) {
-    total++;
-    if (cb->is_nmethod()) {
-      nmethod* nm = (nmethod*)cb;
+  int i = 0;
+  FOR_ALL_HEAPS(heap) {
+    if (SegmentedCodeCache && Verbose) {
+      tty->print_cr("-- Code heap '%s' --", (*heap)->name());
+    }
+    FOR_ALL_BLOBS(cb, *heap) {
+      total++;
+      if (cb->is_nmethod()) {
+        nmethod* nm = (nmethod*)cb;
 
-      if (Verbose && nm->method() != NULL) {
-        ResourceMark rm;
-        char *method_name = nm->method()->name_and_sig_as_C_string();
-        tty->print("%s", method_name);
-        if(nm->is_alive()) { tty->print_cr(" alive"); }
-        if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
-        if(nm->is_zombie()) { tty->print_cr(" zombie"); }
-      }
+        if (Verbose && nm->method() != NULL) {
+          ResourceMark rm;
+          char *method_name = nm->method()->name_and_sig_as_C_string();
+          tty->print("%s", method_name);
+          if(nm->is_alive()) { tty->print_cr(" alive"); }
+          if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
+          if(nm->is_zombie()) { tty->print_cr(" zombie"); }
+        }
 
-      nmethodCount++;
+        nmethodCount++;
 
-      if(nm->is_alive()) { nmethodAlive++; }
-      if(nm->is_not_entrant()) { nmethodNotEntrant++; }
-      if(nm->is_zombie()) { nmethodZombie++; }
-      if(nm->is_unloaded()) { nmethodUnloaded++; }
-      if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
+        if(nm->is_alive()) { nmethodAlive++; }
+        if(nm->is_not_entrant()) { nmethodNotEntrant++; }
+        if(nm->is_zombie()) { nmethodZombie++; }
+        if(nm->is_unloaded()) { nmethodUnloaded++; }
+        if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
 
-      if(nm->method() != NULL && nm->is_java_method()) {
-        nmethodJava++;
-        max_nm_size = MAX2(max_nm_size, nm->size());
+        if(nm->method() != NULL && nm->is_java_method()) {
+          nmethodJava++;
+          max_nm_size = MAX2(max_nm_size, nm->size());
+        }
+      } else if (cb->is_runtime_stub()) {
+        runtimeStubCount++;
+      } else if (cb->is_deoptimization_stub()) {
+        deoptimizationStubCount++;
+      } else if (cb->is_uncommon_trap_stub()) {
+        uncommonTrapStubCount++;
+      } else if (cb->is_adapter_blob()) {
+        adapterCount++;
+      } else if (cb->is_buffer_blob()) {
+        bufferBlobCount++;
       }
-    } else if (cb->is_runtime_stub()) {
-      runtimeStubCount++;
-    } else if (cb->is_deoptimization_stub()) {
-      deoptimizationStubCount++;
-    } else if (cb->is_uncommon_trap_stub()) {
-      uncommonTrapStubCount++;
-    } else if (cb->is_adapter_blob()) {
-      adapterCount++;
-    } else if (cb->is_buffer_blob()) {
-      bufferBlobCount++;
     }
   }
 
@@ -853,12 +1137,11 @@
   int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
   memset(buckets, 0, sizeof(int) * bucketLimit);
 
-  for (cb = first(); cb != NULL; cb = next(cb)) {
-    if (cb->is_nmethod()) {
-      nmethod* nm = (nmethod*)cb;
-      if(nm->is_java_method()) {
-        buckets[nm->size() / bucketSize]++;
-       }
+  NMethodIterator iter;
+  while(iter.next()) {
+    nmethod* nm = iter.method();
+    if(nm->method() != NULL && nm->is_java_method()) {
+      buckets[nm->size() / bucketSize]++;
     }
   }
 
@@ -902,11 +1185,13 @@
   CodeBlob_sizes live;
   CodeBlob_sizes dead;
 
-  FOR_ALL_BLOBS(p) {
-    if (!p->is_alive()) {
-      dead.add(p);
-    } else {
-      live.add(p);
+  FOR_ALL_HEAPS(heap) {
+    FOR_ALL_BLOBS(cb, *heap) {
+      if (!cb->is_alive()) {
+        dead.add(cb);
+      } else {
+        live.add(cb);
+      }
     }
   }
 
@@ -920,21 +1205,22 @@
     dead.print("dead");
   }
 
-
   if (WizardMode) {
      // print the oop_map usage
     int code_size = 0;
     int number_of_blobs = 0;
     int number_of_oop_maps = 0;
     int map_size = 0;
-    FOR_ALL_BLOBS(p) {
-      if (p->is_alive()) {
-        number_of_blobs++;
-        code_size += p->code_size();
-        OopMapSet* set = p->oop_maps();
-        if (set != NULL) {
-          number_of_oop_maps += set->size();
-          map_size           += set->heap_size();
+    FOR_ALL_HEAPS(heap) {
+      FOR_ALL_BLOBS(cb, *heap) {
+        if (cb->is_alive()) {
+          number_of_blobs++;
+          code_size += cb->code_size();
+          OopMapSet* set = cb->oop_maps();
+          if (set != NULL) {
+            number_of_oop_maps += set->size();
+            map_size           += set->heap_size();
+          }
         }
       }
     }
@@ -949,20 +1235,31 @@
 }
 
 void CodeCache::print_summary(outputStream* st, bool detailed) {
-  size_t total = (_heap->high_boundary() - _heap->low_boundary());
-  st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
-               "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
-               total/K, (total - unallocated_capacity())/K,
-               maxCodeCacheUsed/K, unallocated_capacity()/K);
+  FOR_ALL_HEAPS(heap_iterator) {
+    CodeHeap* heap = (*heap_iterator);
+    size_t total = (heap->high_boundary() - heap->low_boundary());
+    if (SegmentedCodeCache) {
+      st->print("CodeHeap '%s':", heap->name());
+    } else {
+      st->print("CodeCache:");
+    }
+    st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
+                 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
+                 total/K, (total - heap->unallocated_capacity())/K,
+                 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K);
+
+    if (detailed) {
+      st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
+                   p2i(heap->low_boundary()),
+                   p2i(heap->high()),
+                   p2i(heap->high_boundary()));
+    }
+  }
 
   if (detailed) {
-    st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
-                 p2i(_heap->low_boundary()),
-                 p2i(_heap->high()),
-                 p2i(_heap->high_boundary()));
     st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
-                 " adapters=" UINT32_FORMAT,
-                 nof_blobs(), nof_nmethods(), nof_adapters());
+                       " adapters=" UINT32_FORMAT,
+                       nof_blobs(), nof_nmethods(), nof_adapters());
     st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
                  "enabled" : Arguments::mode() == Arguments::_int ?
                  "disabled (interpreter mode)" :
@@ -973,12 +1270,14 @@
 void CodeCache::print_codelist(outputStream* st) {
   assert_locked_or_safepoint(CodeCache_lock);
 
-  FOR_ALL_NMETHODS(p) {
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* nm = iter.method();
     ResourceMark rm;
-    char *method_name = p->method()->name_and_sig_as_C_string();
+    char *method_name = nm->method()->name_and_sig_as_C_string();
     st->print_cr("%d %d %s ["INTPTR_FORMAT", "INTPTR_FORMAT" - "INTPTR_FORMAT"]",
-                 p->compile_id(), p->comp_level(), method_name, (intptr_t)p->header_begin(),
-                 (intptr_t)p->code_begin(), (intptr_t)p->code_end());
+                 nm->compile_id(), nm->comp_level(), method_name, (intptr_t)nm->header_begin(),
+                 (intptr_t)nm->code_begin(), (intptr_t)nm->code_end());
   }
 }
 
@@ -995,4 +1294,3 @@
             nof_blobs(), nof_nmethods(), nof_adapters(),
             unallocated_capacity());
 }
-
--- a/hotspot/src/share/vm/code/codeCache.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/code/codeCache.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -26,105 +26,117 @@
 #define SHARE_VM_CODE_CODECACHE_HPP
 
 #include "code/codeBlob.hpp"
+#include "code/nmethod.hpp"
 #include "memory/allocation.hpp"
 #include "memory/heap.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/oopsHierarchy.hpp"
+#include "runtime/mutexLocker.hpp"
 
 // The CodeCache implements the code cache for various pieces of generated
 // code, e.g., compiled java methods, runtime stubs, transition frames, etc.
 // The entries in the CodeCache are all CodeBlob's.
 
-// Implementation:
-//   - Each CodeBlob occupies one chunk of memory.
-//   - Like the offset table in oldspace the zone has at table for
-//     locating a method given a addess of an instruction.
+// -- Implementation --
+// The CodeCache consists of one or more CodeHeaps, each of which contains
+// CodeBlobs of a specific CodeBlobType. Currently heaps for the following
+// types are available:
+//  - Non-methods: Non-methods like Buffers, Adapters and Runtime Stubs
+//  - Profiled nmethods: nmethods that are profiled, i.e., those
+//    executed at level 2 or 3
+//  - Non-Profiled nmethods: nmethods that are not profiled, i.e., those
+//    executed at level 1 or 4 and native methods
+//  - All: Used for code of all types if code cache segmentation is disabled.
+//
+// In the rare case of the non-method code heap getting full, non-method code
+// will be stored in the non-profiled code heap as a fallback solution.
+//
+// Depending on the availability of compilers and TieredCompilation there
+// may be fewer heaps. The size of the code heaps depends on the values of
+// ReservedCodeCacheSize, NonProfiledCodeHeapSize and ProfiledCodeHeapSize
+// (see CodeCache::heap_available(..) and CodeCache::initialize_heaps(..)
+// for details).
+//
+// Code cache segmentation is controlled by the flag SegmentedCodeCache.
+// If turned off, all code types are stored in a single code heap. By default
+// code cache segmentation is turned on if TieredCompilation is enabled and
+// ReservedCodeCacheSize >= 240 MB.
+//
+// All methods of the CodeCache accepting a CodeBlobType only apply to
+// CodeBlobs of the given type. For example, iteration over the
+// CodeBlobs of a specific type can be done by using CodeCache::first_blob(..)
+// and CodeCache::next_blob(..) and providing the corresponding CodeBlobType.
+//
+// IMPORTANT: If you add new CodeHeaps to the code cache or change the
+// existing ones, make sure to adapt the dtrace scripts (jhelper.d) for
+// Solaris and BSD.
 
 class OopClosure;
 class DepChange;
 
 class CodeCache : AllStatic {
   friend class VMStructs;
+  friend class NMethodIterator;
  private:
-  // CodeHeap is malloc()'ed at startup and never deleted during shutdown,
-  // so that the generated assembly code is always there when it's needed.
-  // This may cause memory leak, but is necessary, for now. See 4423824,
-  // 4422213 or 4436291 for details.
-  static CodeHeap * _heap;
-  static int _number_of_blobs;
-  static int _number_of_adapters;
-  static int _number_of_nmethods;
-  static int _number_of_nmethods_with_dependencies;
-  static bool _needs_cache_clean;
-  static nmethod* _scavenge_root_nmethods;  // linked via nm->scavenge_root_link()
+  // CodeHeaps of the cache
+  static GrowableArray<CodeHeap*>* _heaps;
+
+  static address _low_bound;                            // Lower bound of CodeHeap addresses
+  static address _high_bound;                           // Upper bound of CodeHeap addresses
+  static int _number_of_blobs;                          // Total number of CodeBlobs in the cache
+  static int _number_of_adapters;                       // Total number of Adapters in the cache
+  static int _number_of_nmethods;                       // Total number of nmethods in the cache
+  static int _number_of_nmethods_with_dependencies;     // Total number of nmethods with dependencies
+  static bool _needs_cache_clean;                       // True if inline caches of the nmethods needs to be flushed
+  static nmethod* _scavenge_root_nmethods;              // linked via nm->scavenge_root_link()
+  static int _codemem_full_count;                       // Number of times a CodeHeap in the cache was full
 
   static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
   static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
 
-  static int _codemem_full_count;
-  static size_t bytes_allocated_in_freelist() { return _heap->allocated_in_freelist(); }
-  static int    allocated_segments()          { return _heap->allocated_segments(); }
-  static size_t freelist_length()             { return _heap->freelist_length(); }
+  // CodeHeap management
+  static void initialize_heaps();                             // Initializes the CodeHeaps
+  // Creates a new heap with the given name and size, containing CodeBlobs of the given type
+  static void add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type);
+  static CodeHeap* get_code_heap(CodeBlob* cb);               // Returns the CodeHeap for the given CodeBlob
+  static CodeHeap* get_code_heap(int code_blob_type);         // Returns the CodeHeap for the given CodeBlobType
+  static bool heap_available(int code_blob_type);             // Returns true if a CodeHeap for the given CodeBlobType is available
+  static ReservedCodeSpace reserve_heap_memory(size_t size);  // Reserves one continuous chunk of memory for the CodeHeaps
+
+  // Iteration
+  static CodeBlob* first_blob(CodeHeap* heap);                // Returns the first CodeBlob on the given CodeHeap
+  static CodeBlob* first_blob(int code_blob_type);            // Returns the first CodeBlob of the given type
+  static CodeBlob* next_blob(CodeHeap* heap, CodeBlob* cb);   // Returns the first alive CodeBlob on the given CodeHeap
+  static CodeBlob* next_blob(CodeBlob* cb);                   // Returns the next CodeBlob of the given type succeeding the given CodeBlob
+
+  static size_t bytes_allocated_in_freelists();
+  static int    allocated_segments();
+  static size_t freelists_length();
 
  public:
-
   // Initialization
   static void initialize();
 
-  static void report_codemem_full();
-
   // Allocation/administration
-  static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob
-  static void commit(CodeBlob* cb);                 // called when the allocated CodeBlob has been filled
-  static int alignment_unit();                      // guaranteed alignment of all CodeBlobs
-  static int alignment_offset();                    // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
-  static void free(CodeBlob* cb);                   // frees a CodeBlob
-  static bool contains(void *p);                    // returns whether p is included
-  static void blobs_do(void f(CodeBlob* cb));       // iterates over all CodeBlobs
-  static void blobs_do(CodeBlobClosure* f);         // iterates over all CodeBlobs
-  static void nmethods_do(void f(nmethod* nm));     // iterates over all nmethods
-  static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods
+  static CodeBlob* allocate(int size, int code_blob_type, bool is_critical = false); // allocates a new CodeBlob
+  static void commit(CodeBlob* cb);                     // called when the allocated CodeBlob has been filled
+  static int  alignment_unit();                         // guaranteed alignment of all CodeBlobs
+  static int  alignment_offset();                       // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
+  static void free(CodeBlob* cb);                       // frees a CodeBlob
+  static bool contains(void *p);                        // returns whether p is included
+  static void blobs_do(void f(CodeBlob* cb));           // iterates over all CodeBlobs
+  static void blobs_do(CodeBlobClosure* f);             // iterates over all CodeBlobs
+  static void nmethods_do(void f(nmethod* nm));         // iterates over all nmethods
+  static void alive_nmethods_do(void f(nmethod* nm));   // iterates over all alive nmethods
 
   // Lookup
-  static CodeBlob* find_blob(void* start);
-  static nmethod*  find_nmethod(void* start);
-
-  // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
-  // what you are doing)
-  static CodeBlob* find_blob_unsafe(void* start) {
-    // NMT can walk the stack before code cache is created
-    if (_heap == NULL) return NULL;
+  static CodeBlob* find_blob(void* start);              // Returns the CodeBlob containing the given address
+  static CodeBlob* find_blob_unsafe(void* start);       // Same as find_blob but does not fail if looking up a zombie method
+  static nmethod*  find_nmethod(void* start);           // Returns the nmethod containing the given address
 
-    CodeBlob* result = (CodeBlob*)_heap->find_start(start);
-    // this assert is too strong because the heap code will return the
-    // heapblock containing start. That block can often be larger than
-    // the codeBlob itself. If you look up an address that is within
-    // the heapblock but not in the codeBlob you will assert.
-    //
-    // Most things will not lookup such bad addresses. However
-    // AsyncGetCallTrace can see intermediate frames and get that kind
-    // of invalid address and so can a developer using hsfind.
-    //
-    // The more correct answer is to return NULL if blob_contains() returns
-    // false.
-    // assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob");
-
-    if (result != NULL && !result->blob_contains((address)start)) {
-      result = NULL;
-    }
-    return result;
-  }
-
-  // Iteration
-  static CodeBlob* first();
-  static CodeBlob* next (CodeBlob* cb);
-  static CodeBlob* alive(CodeBlob *cb);
-  static nmethod* alive_nmethod(CodeBlob *cb);
-  static nmethod* first_nmethod();
-  static nmethod* next_nmethod (CodeBlob* cb);
-  static int       nof_blobs()                 { return _number_of_blobs; }
-  static int       nof_adapters()              { return _number_of_adapters; }
-  static int       nof_nmethods()              { return _number_of_nmethods; }
+  static int       nof_blobs()      { return _number_of_blobs; }      // Returns the total number of CodeBlobs in the cache
+  static int       nof_adapters()   { return _number_of_adapters; }   // Returns the total number of Adapters in the cache
+  static int       nof_nmethods()   { return _number_of_nmethods; }   // Returns the total number of nmethods in the cache
 
   // GC support
   static void gc_epilogue();
@@ -137,7 +149,7 @@
   static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
   static void scavenge_root_nmethods_do(CodeBlobClosure* f);
 
-  static nmethod* scavenge_root_nmethods()          { return _scavenge_root_nmethods; }
+  static nmethod* scavenge_root_nmethods()            { return _scavenge_root_nmethods; }
   static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }
   static void add_scavenge_root_nmethod(nmethod* nm);
   static void drop_scavenge_root_nmethod(nmethod* nm);
@@ -151,27 +163,47 @@
   static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN;
   static void print_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage
   static void log_state(outputStream* st);
+  static const char* get_code_heap_name(int code_blob_type)  { return (heap_available(code_blob_type) ? get_code_heap(code_blob_type)->name() : "Unused"); }
+  static void report_codemem_full(int code_blob_type, bool print);
 
   // Dcmd (Diagnostic commands)
   static void print_codelist(outputStream* st);
   static void print_layout(outputStream* st);
 
   // The full limits of the codeCache
-  static address  low_bound()                    { return (address) _heap->low_boundary(); }
-  static address  high_bound()                   { return (address) _heap->high_boundary(); }
-  static address  high()                         { return (address) _heap->high(); }
+  static address low_bound()                          { return _low_bound; }
+  static address high_bound()                         { return _high_bound; }
 
   // Profiling
-  static address first_address();                // first address used for CodeBlobs
-  static address last_address();                 // last  address used for CodeBlobs
-  static size_t  capacity()                      { return _heap->capacity(); }
-  static size_t  max_capacity()                  { return _heap->max_capacity(); }
-  static size_t  unallocated_capacity()          { return _heap->unallocated_capacity(); }
-  static double  reverse_free_ratio();
+  static size_t capacity(int code_blob_type)             { return heap_available(code_blob_type) ? get_code_heap(code_blob_type)->capacity() : 0; }
+  static size_t capacity();
+  static size_t unallocated_capacity(int code_blob_type) { return heap_available(code_blob_type) ? get_code_heap(code_blob_type)->unallocated_capacity() : 0; }
+  static size_t unallocated_capacity();
+  static size_t max_capacity(int code_blob_type)         { return heap_available(code_blob_type) ? get_code_heap(code_blob_type)->max_capacity() : 0; }
+  static size_t max_capacity();
+
+  static bool   is_full(int* code_blob_type);
+  static double reverse_free_ratio(int code_blob_type);
+
+  static bool needs_cache_clean()                     { return _needs_cache_clean; }
+  static void set_needs_cache_clean(bool v)           { _needs_cache_clean = v;    }
+  static void clear_inline_caches();                  // clear all inline caches
 
-  static bool needs_cache_clean()                { return _needs_cache_clean; }
-  static void set_needs_cache_clean(bool v)      { _needs_cache_clean = v;    }
-  static void clear_inline_caches();             // clear all inline caches
+  // Returns the CodeBlobType for nmethods of the given compilation level
+  static int get_code_blob_type(int comp_level) {
+    if (comp_level == CompLevel_none ||
+        comp_level == CompLevel_simple ||
+        comp_level == CompLevel_full_optimization) {
+      // Non profiled methods
+      return CodeBlobType::MethodNonProfiled;
+    } else if (comp_level == CompLevel_limited_profile ||
+               comp_level == CompLevel_full_profile) {
+      // Profiled methods
+      return CodeBlobType::MethodProfiled;
+    }
+    ShouldNotReachHere();
+    return 0;
+  }
 
   static void verify_clean_inline_caches();
   static void verify_icholder_relocations();
@@ -187,10 +219,87 @@
   static void make_marked_nmethods_zombies();
   static void make_marked_nmethods_not_entrant();
 
-    // tells how many nmethods have dependencies
+  // tells how many nmethods have dependencies
   static int number_of_nmethods_with_dependencies();
 
   static int get_codemem_full_count() { return _codemem_full_count; }
 };
 
+
+// Iterator to iterate over nmethods in the CodeCache.
+class NMethodIterator : public StackObj {
+ private:
+  CodeBlob* _code_blob;   // Current CodeBlob
+  int _code_blob_type;    // Refers to current CodeHeap
+
+ public:
+  NMethodIterator() {
+    initialize(NULL); // Set to NULL, initialized by first call to next()
+  }
+
+  NMethodIterator(nmethod* nm) {
+    initialize(nm);
+  }
+
+  // Advance iterator to next nmethod
+  bool next() {
+    assert_locked_or_safepoint(CodeCache_lock);
+    assert(_code_blob_type < CodeBlobType::NumTypes, "end reached");
+
+    bool result = next_nmethod();
+    while (!result && (_code_blob_type < CodeBlobType::MethodProfiled)) {
+      // Advance to next code heap if segmented code cache
+      _code_blob_type++;
+      result = next_nmethod();
+    }
+    return result;
+  }
+
+  // Advance iterator to next alive nmethod
+  bool next_alive() {
+    bool result = next();
+    while(result && !_code_blob->is_alive()) {
+      result = next();
+    }
+    return result;
+  }
+
+  bool end()        const   { return _code_blob == NULL; }
+  nmethod* method() const   { return (nmethod*)_code_blob; }
+
+private:
+  // Initialize iterator to given nmethod
+  void initialize(nmethod* nm) {
+    _code_blob = (CodeBlob*)nm;
+    if (!SegmentedCodeCache) {
+      // Iterate over all CodeBlobs
+      _code_blob_type = CodeBlobType::All;
+    } else if (nm != NULL) {
+      _code_blob_type = CodeCache::get_code_blob_type(nm->comp_level());
+    } else {
+      // Only iterate over method code heaps, starting with non-profiled
+      _code_blob_type = CodeBlobType::MethodNonProfiled;
+    }
+  }
+
+  // Advance iterator to the next nmethod in the current code heap
+  bool next_nmethod() {
+    // Get first method CodeBlob
+    if (_code_blob == NULL) {
+      _code_blob = CodeCache::first_blob(_code_blob_type);
+      if (_code_blob == NULL) {
+        return false;
+      } else if (_code_blob->is_nmethod()) {
+        return true;
+      }
+    }
+    // Search for next method CodeBlob
+    _code_blob = CodeCache::next_blob(_code_blob);
+    while (_code_blob != NULL && !_code_blob->is_nmethod()) {
+      _code_blob = CodeCache::next_blob(_code_blob);
+    }
+    return _code_blob != NULL;
+  }
+};
+
 #endif // SHARE_VM_CODE_CODECACHE_HPP
--- a/hotspot/src/share/vm/code/nmethod.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -500,7 +500,7 @@
     CodeOffsets offsets;
     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
-    nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size,
+    nm = new (native_nmethod_size, CompLevel_none) nmethod(method(), native_nmethod_size,
                                             compile_id, &offsets,
                                             code_buffer, frame_size,
                                             basic_lock_owner_sp_offset,
@@ -538,7 +538,7 @@
     offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
 
-    nm = new (nmethod_size) nmethod(method(), nmethod_size,
+    nm = new (nmethod_size, CompLevel_none) nmethod(method(), nmethod_size,
                                     &offsets, code_buffer, frame_size);
 
     NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_nmethod(nm));
@@ -586,7 +586,7 @@
       + round_to(nul_chk_table->size_in_bytes(), oopSize)
       + round_to(debug_info->data_size()       , oopSize);
 
-    nm = new (nmethod_size)
+    nm = new (nmethod_size, comp_level)
     nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
             orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
             oop_maps,
@@ -803,9 +803,11 @@
 }
 #endif // def HAVE_DTRACE_H
 
-void* nmethod::operator new(size_t size, int nmethod_size) throw() {
-  // Not critical, may return null if there is too little continuous memory
-  return CodeCache::allocate(nmethod_size);
+void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
+  // With a SegmentedCodeCache, nmethods are allocated on separate heaps and therefore do not share memory
+  // with critical CodeBlobs. We define the allocation as critical to make sure all code heap memory is used.
+  bool is_critical = SegmentedCodeCache;
+  return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level), is_critical);
 }
 
 nmethod::nmethod(
@@ -1530,7 +1532,7 @@
   Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
   if (PrintMethodFlushing) {
     tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
-        _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
+        _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(_comp_level))/1024);
   }
 
   // We need to deallocate any ExceptionCache data.
@@ -1557,7 +1559,6 @@
   CodeCache::free(this);
 }
 
-
 //
 // Notify all classes this nmethod is dependent on that it is no
 // longer dependent. This should only be called in two situations.
@@ -2418,15 +2419,18 @@
   // Turn off dependency tracing while actually testing dependencies.
   NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
 
- typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
-                           &DependencySignature::equals, 11027> DepTable;
-
- DepTable* table = new DepTable();
+  typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
+                            &DependencySignature::equals, 11027> DepTable;
+
+  DepTable* table = new DepTable();
 
   // Iterate over live nmethods and check dependencies of all nmethods that are not
   // marked for deoptimization. A particular dependency is only checked once.
-  for(nmethod* nm = CodeCache::alive_nmethod(CodeCache::first()); nm != NULL; nm = CodeCache::alive_nmethod(CodeCache::next(nm))) {
-    if (!nm->is_marked_for_deoptimization()) {
+  NMethodIterator iter;
+  while(iter.next()) {
+    nmethod* nm = iter.method();
+    // Only notify for live nmethods
+    if (nm->is_alive() && !nm->is_marked_for_deoptimization()) {
       for (Dependencies::DepStream deps(nm); deps.next(); ) {
         // Construct abstraction of a dependency.
         DependencySignature* current_sig = new DependencySignature(deps);
--- a/hotspot/src/share/vm/code/nmethod.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -288,7 +288,7 @@
           int comp_level);
 
   // helper methods
-  void* operator new(size_t size, int nmethod_size) throw();
+  void* operator new(size_t size, int nmethod_size, int comp_level) throw();
 
   const char* reloc_string_for(u_char* begin, u_char* end);
   // Returns true if this thread changed the state of the nmethod or
--- a/hotspot/src/share/vm/code/vtableStubs.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/code/vtableStubs.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -63,7 +63,7 @@
    // If changing the name, update the other file accordingly.
     BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
     if (blob == NULL) {
-      CompileBroker::handle_full_code_cache();
+      CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
       return NULL;
     }
     _chunk = blob->content_begin();
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -783,18 +783,22 @@
 
 
 void CompileBroker::print_compile_queues(outputStream* st) {
-  _c1_compile_queue->print(st);
-  _c2_compile_queue->print(st);
+  MutexLocker locker(MethodCompileQueue_lock);
+  if (_c1_compile_queue != NULL) {
+    _c1_compile_queue->print(st);
+  }
+  if (_c2_compile_queue != NULL) {
+    _c2_compile_queue->print(st);
+  }
 }
 
-
 void CompileQueue::print(outputStream* st) {
-  assert_locked_or_safepoint(lock());
+  assert(lock()->owned_by_self(), "must own lock");
   st->print_cr("Contents of %s", name());
   st->print_cr("----------------------------");
   CompileTask* task = _first;
   if (task == NULL) {
-    st->print_cr("Empty");;
+    st->print_cr("Empty");
   } else {
     while (task != NULL) {
       task->print_compilation(st, NULL, true, true);
@@ -1206,6 +1210,12 @@
     return;
   }
 
+  if (TieredCompilation) {
+    // Tiered policy requires MethodCounters to exist before adding a method to
+    // the queue. Create if we don't have them yet.
+    method->get_method_counters(thread);
+  }
+
   // Outputs from the following MutexLocker block:
   CompileTask* task     = NULL;
   bool         blocking = false;
@@ -1747,9 +1757,11 @@
     // We need this HandleMark to avoid leaking VM handles.
     HandleMark hm(thread);
 
-    if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
-      // the code cache is really full
-      handle_full_code_cache();
+    // Check if the CodeCache is full
+    int code_blob_type = 0;
+    if (CodeCache::is_full(&code_blob_type)) {
+      // The CodeHeap for code_blob_type is really full
+      handle_full_code_cache(code_blob_type);
     }
 
     CompileTask* task = queue->get();
@@ -1777,22 +1789,6 @@
     if (method()->number_of_breakpoints() == 0) {
       // Compile the method.
       if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
-#ifdef COMPILER1
-        // Allow repeating compilations for the purpose of benchmarking
-        // compile speed. This is not useful for customers.
-        if (CompilationRepeat != 0) {
-          int compile_count = CompilationRepeat;
-          while (compile_count > 0) {
-            invoke_compiler_on_method(task);
-            nmethod* nm = method->code();
-            if (nm != NULL) {
-              nm->make_zombie();
-              method->clear_code();
-            }
-            compile_count--;
-          }
-        }
-#endif /* COMPILER1 */
         invoke_compiler_on_method(task);
       } else {
         // After compilation is disabled, remove remaining methods from queue
@@ -2079,7 +2075,7 @@
  * The CodeCache is full.  Print out warning and disable compilation
  * or try code cache cleaning so compilation can continue later.
  */
-void CompileBroker::handle_full_code_cache() {
+void CompileBroker::handle_full_code_cache(int code_blob_type) {
   UseInterpreter = true;
   if (UseCompiler || AlwaysCompileLoopMethods ) {
     if (xtty != NULL) {
@@ -2096,8 +2092,6 @@
       xtty->end_elem();
     }
 
-    CodeCache::report_codemem_full();
-
 #ifndef PRODUCT
     if (CompileTheWorld || ExitOnFullCodeCache) {
       codecache_print(/* detailed= */ true);
@@ -2119,12 +2113,7 @@
       disable_compilation_forever();
     }
 
-    // Print warning only once
-    if (should_print_compiler_warning()) {
-      warning("CodeCache is full. Compiler has been disabled.");
-      warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
-      codecache_print(/* detailed= */ true);
-    }
+    CodeCache::report_codemem_full(code_blob_type, should_print_compiler_warning());
   }
 }
 
--- a/hotspot/src/share/vm/compiler/compileBroker.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/compiler/compileBroker.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -434,7 +434,7 @@
   static bool is_compilation_disabled_forever() {
     return _should_compile_new_jobs == shutdown_compilaton;
   }
-  static void handle_full_code_cache();
+  static void handle_full_code_cache(int code_blob_type);
   // Ensures that warning is only printed once.
   static bool should_print_compiler_warning() {
     jint old = Atomic::cmpxchg(1, &_print_compilation_warning, 0);
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -4167,7 +4167,7 @@
 // been published), so we do not need to check for
 // uninitialized objects before pushing here.
 void Par_ConcMarkingClosure::do_oop(oop obj) {
-  assert(obj->is_oop_or_null(true), "expected an oop or NULL");
+  assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
   HeapWord* addr = (HeapWord*)obj;
   // Check if oop points into the CMS generation
   // and is not marked
@@ -7226,7 +7226,7 @@
 // isMarked() query is "safe".
 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
   // Ignore mark word because we are running concurrent with mutators
-  assert(p->is_oop_or_null(true), "expected an oop or null");
+  assert(p->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(p)));
   HeapWord* addr = (HeapWord*)p;
   assert(_span.contains(addr), "we are scanning the CMS generation");
   bool is_obj_array = false;
@@ -7666,7 +7666,7 @@
 }
 
 void PushAndMarkVerifyClosure::do_oop(oop obj) {
-  assert(obj->is_oop_or_null(), "expected an oop or NULL");
+  assert(obj->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
   HeapWord* addr = (HeapWord*)obj;
   if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
     // Oop lies in _span and isn't yet grey or black
@@ -7764,7 +7764,7 @@
 
 void PushOrMarkClosure::do_oop(oop obj) {
   // Ignore mark word because we are running concurrent with mutators.
-  assert(obj->is_oop_or_null(true), "expected an oop or NULL");
+  assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
   HeapWord* addr = (HeapWord*)obj;
   if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
     // Oop lies in _span and isn't yet grey or black
@@ -7802,7 +7802,7 @@
 
 void Par_PushOrMarkClosure::do_oop(oop obj) {
   // Ignore mark word because we are running concurrent with mutators.
-  assert(obj->is_oop_or_null(true), "expected an oop or NULL");
+  assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
   HeapWord* addr = (HeapWord*)obj;
   if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
     // Oop lies in _span and isn't yet grey or black
@@ -7879,7 +7879,7 @@
   // path and may be at the end of the global overflow list (so
   // the mark word may be NULL).
   assert(obj->is_oop_or_null(true /* ignore mark word */),
-         "expected an oop or NULL");
+         err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
   HeapWord* addr = (HeapWord*)obj;
   // Check if oop points into the CMS generation
   // and is not marked
@@ -7959,7 +7959,7 @@
   // the debugger, is_oop_or_null(false) may subsequently start
   // to hold.
   assert(obj->is_oop_or_null(true),
-         "expected an oop or NULL");
+         err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
   HeapWord* addr = (HeapWord*)obj;
   // Check if oop points into the CMS generation
   // and is not marked
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -73,7 +73,7 @@
     } else {
       res = (PromotedObject*)(_next & next_mask);
     }
-    assert(oop(res)->is_oop_or_null(true /* ignore mark word */), "Not an oop?");
+    assert(oop(res)->is_oop_or_null(true /* ignore mark word */), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(oop(res))));
     return res;
   }
   inline void setNext(PromotedObject* x) {
--- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -107,7 +107,7 @@
     HeapRegion *curr = regions_at(index++);
     guarantee(curr != NULL, "Regions in _regions array cannot be NULL");
     guarantee(!curr->is_young(), "should not be young!");
-    guarantee(!curr->isHumongous(), "should not be humongous!");
+    guarantee(!curr->is_humongous(), "should not be humongous!");
     if (prev != NULL) {
       guarantee(order_regions(prev, curr) != 1,
                 err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
@@ -149,7 +149,7 @@
 
 
 void CollectionSetChooser::add_region(HeapRegion* hr) {
-  assert(!hr->isHumongous(),
+  assert(!hr->is_humongous(),
          "Humongous regions shouldn't be added to the collection set");
   assert(!hr->is_young(), "should not be young!");
   _regions.append(hr);
--- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -109,7 +109,7 @@
   bool should_add(HeapRegion* hr) {
     assert(hr->is_marked(), "pre-condition");
     assert(!hr->is_young(), "should never consider young regions");
-    return !hr->isHumongous() &&
+    return !hr->is_humongous() &&
             hr->live_bytes() < _region_live_threshold_bytes;
   }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -910,7 +910,7 @@
 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 public:
   bool doHeapRegion(HeapRegion* r) {
-    if (!r->continuesHumongous()) {
+    if (!r->is_continues_humongous()) {
       r->note_start_of_marking();
     }
     return false;
@@ -1288,6 +1288,22 @@
   print_stats();
 }
 
+// Helper class to get rid of some boilerplate code.
+class G1CMTraceTime : public GCTraceTime {
+  static bool doit_and_prepend(bool doit) {
+    if (doit) {
+      gclog_or_tty->put(' ');
+    }
+    return doit;
+  }
+
+ public:
+  G1CMTraceTime(const char* title, bool doit)
+    : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
+        G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
+  }
+};
+
 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
   // world is stopped at this checkpoint
   assert(SafepointSynchronize::is_at_safepoint(),
@@ -1341,9 +1357,13 @@
     // marking due to overflowing the global mark stack.
     reset_marking_state();
   } else {
-    // Aggregate the per-task counting data that we have accumulated
-    // while marking.
-    aggregate_count_data();
+    {
+      G1CMTraceTime trace("GC aggregate-data", G1Log::finer());
+
+      // Aggregate the per-task counting data that we have accumulated
+      // while marking.
+      aggregate_count_data();
+    }
 
     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
     // We're done with marking.
@@ -1398,10 +1418,10 @@
   // to 1 the bits on the region bitmap that correspond to its
   // associated "continues humongous" regions.
   void set_bit_for_region(HeapRegion* hr) {
-    assert(!hr->continuesHumongous(), "should have filtered those out");
+    assert(!hr->is_continues_humongous(), "should have filtered those out");
 
     BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
-    if (!hr->startsHumongous()) {
+    if (!hr->is_starts_humongous()) {
       // Normal (non-humongous) case: just set the bit.
       _region_bm->par_at_put(index, true);
     } else {
@@ -1434,7 +1454,7 @@
 
   bool doHeapRegion(HeapRegion* hr) {
 
-    if (hr->continuesHumongous()) {
+    if (hr->is_continues_humongous()) {
       // We will ignore these here and process them when their
       // associated "starts humongous" region is processed (see
       // set_bit_for_heap_region()). Note that we cannot rely on their
@@ -1556,7 +1576,7 @@
   int failures() const { return _failures; }
 
   bool doHeapRegion(HeapRegion* hr) {
-    if (hr->continuesHumongous()) {
+    if (hr->is_continues_humongous()) {
       // We will ignore these here and process them when their
       // associated "starts humongous" region is processed (see
       // set_bit_for_heap_region()). Note that we cannot rely on their
@@ -1731,7 +1751,7 @@
 
   bool doHeapRegion(HeapRegion* hr) {
 
-    if (hr->continuesHumongous()) {
+    if (hr->is_continues_humongous()) {
       // We will ignore these here and process them when their
       // associated "starts humongous" region is processed (see
       // set_bit_for_heap_region()). Note that we cannot rely on their
@@ -1861,7 +1881,7 @@
   const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
 
   bool doHeapRegion(HeapRegion *hr) {
-    if (hr->continuesHumongous()) {
+    if (hr->is_continues_humongous()) {
       return false;
     }
     // We use a claim value of zero here because all regions
@@ -1875,8 +1895,8 @@
     if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
       _freed_bytes += hr->used();
       hr->set_containing_set(NULL);
-      if (hr->isHumongous()) {
-        assert(hr->startsHumongous(), "we should only see starts humongous");
+      if (hr->is_humongous()) {
+        assert(hr->is_starts_humongous(), "we should only see starts humongous");
         _humongous_regions_removed.increment(1u, hr->capacity());
         _g1->free_humongous_region(hr, _local_cleanup_list, true);
       } else {
@@ -2466,22 +2486,6 @@
   G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
 }
 
-// Helper class to get rid of some boilerplate code.
-class G1RemarkGCTraceTime : public GCTraceTime {
-  static bool doit_and_prepend(bool doit) {
-    if (doit) {
-      gclog_or_tty->put(' ');
-    }
-    return doit;
-  }
-
- public:
-  G1RemarkGCTraceTime(const char* title, bool doit)
-    : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
-        G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
-  }
-};
-
 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
   if (has_overflown()) {
     // Skip processing the discovered references if we have
@@ -2504,10 +2508,7 @@
   // Inner scope to exclude the cleaning of the string and symbol
   // tables from the displayed time.
   {
-    if (G1Log::finer()) {
-      gclog_or_tty->put(' ');
-    }
-    GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm(), concurrent_gc_id());
+    G1CMTraceTime t("GC ref-proc", G1Log::finer());
 
     ReferenceProcessor* rp = g1h->ref_processor_cm();
 
@@ -2598,24 +2599,24 @@
 
   // Unload Klasses, String, Symbols, Code Cache, etc.
   {
-    G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
+    G1CMTraceTime trace("Unloading", G1Log::finer());
 
     if (ClassUnloadingWithConcurrentMark) {
       bool purged_classes;
 
       {
-        G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
+        G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest());
         purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
       }
 
       {
-        G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
+        G1CMTraceTime trace("Parallel Unloading", G1Log::finest());
         weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
       }
     }
 
     if (G1StringDedup::is_enabled()) {
-      G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
+      G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest());
       G1StringDedup::unlink(&g1_is_alive);
     }
   }
@@ -2719,7 +2720,7 @@
   HandleMark   hm;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
-  G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer());
+  G1CMTraceTime trace("Finalize Marking", G1Log::finer());
 
   g1h->ensure_parsability(false);
 
@@ -3191,7 +3192,7 @@
     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
 
   bool doHeapRegion(HeapRegion* hr) {
-    if (hr->continuesHumongous()) {
+    if (hr->is_continues_humongous()) {
       // We will ignore these here and process them when their
       // associated "starts humongous" region is processed.
       // Note that we cannot rely on their associated
@@ -3334,6 +3335,7 @@
   } else {
     g1_par_agg_task.work(0);
   }
+  _g1h->allocation_context_stats().update_at_remark();
 }
 
 // Clear the per-worker arrays used to store the per-region counting data
@@ -3562,7 +3564,7 @@
 void CMTask::setup_for_region(HeapRegion* hr) {
   assert(hr != NULL,
         "claim_region() should have filtered out NULL regions");
-  assert(!hr->continuesHumongous(),
+  assert(!hr->is_continues_humongous(),
         "claim_region() should have filtered out continues humongous regions");
 
   if (_cm->verbose_low()) {
@@ -4287,7 +4289,7 @@
                                HR_FORMAT_PARAMS(_curr_region));
       }
 
-      assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(),
+      assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
              "humongous regions should go around loop once only");
 
       // Some special cases:
@@ -4301,7 +4303,7 @@
       if (mr.is_empty()) {
         giveup_current_region();
         regular_clock_call();
-      } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) {
+      } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
         if (_nextMarkBitMap->isMarked(mr.start())) {
           // The object is marked - apply the closure
           BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
@@ -4748,7 +4750,7 @@
   size_t remset_bytes    = r->rem_set()->mem_size();
   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
 
-  if (r->startsHumongous()) {
+  if (r->is_starts_humongous()) {
     assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
            _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
            "they should have been zeroed after the last time we used them");
@@ -4760,7 +4762,7 @@
     get_hum_bytes(&used_bytes, &capacity_bytes,
                   &prev_live_bytes, &next_live_bytes);
     end = bottom + HeapRegion::GrainWords;
-  } else if (r->continuesHumongous()) {
+  } else if (r->is_continues_humongous()) {
     get_hum_bytes(&used_bytes, &capacity_bytes,
                   &prev_live_bytes, &next_live_bytes);
     assert(end == bottom + HeapRegion::GrainWords, "invariant");
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -88,7 +88,7 @@
   size_t region_size_bytes = mr.byte_size();
   uint index = hr->hrm_index();
 
-  assert(!hr->continuesHumongous(), "should not be HC region");
+  assert(!hr->is_continues_humongous(), "should not be HC region");
   assert(hr == g1h->heap_region_containing(start), "sanity");
   assert(hr == g1h->heap_region_containing(mr.last()), "sanity");
   assert(marked_bytes_array != NULL, "pre-condition");
@@ -277,7 +277,7 @@
   ++_refs_reached;
 
   HeapWord* objAddr = (HeapWord*) obj;
-  assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
+  assert(obj->is_oop_or_null(true /* ignore mark word */), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
   if (_g1h->is_in_g1_reserved(objAddr)) {
     assert(obj != NULL, "null check is implicit");
     if (!_nextMarkBitMap->isMarked(objAddr)) {
@@ -366,7 +366,7 @@
   assert(hr != NULL, "sanity");
   // Given that we're looking for a region that contains an object
   // header it's impossible to get back a HC region.
-  assert(!hr->continuesHumongous(), "sanity");
+  assert(!hr->is_continues_humongous(), "sanity");
 
   // We cannot assert that word_size == obj->size() given that obj
   // might not be in a consistent state (another thread might be in
--- a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -129,8 +129,7 @@
     // Note that we first perform the allocation and then we store the
     // region in _alloc_region. This is the reason why an active region
     // can never be empty.
-    _alloc_region = new_alloc_region;
-    _count += 1;
+    update_alloc_region(new_alloc_region);
     trace("region allocation successful");
     return result;
   } else {
@@ -172,6 +171,19 @@
   trace("set");
 }
 
+void G1AllocRegion::update_alloc_region(HeapRegion* alloc_region) {
+  trace("update");
+  // We explicitly check that the region is not empty to make sure we
+  // maintain the "the alloc region cannot be empty" invariant.
+  assert(alloc_region != NULL && !alloc_region->is_empty(),
+         ar_ext_msg(this, "pre-condition"));
+
+  _alloc_region = alloc_region;
+  _alloc_region->set_allocation_context(allocation_context());
+  _count += 1;
+  trace("updated");
+}
+
 HeapRegion* G1AllocRegion::release() {
   trace("releasing");
   HeapRegion* alloc_region = _alloc_region;
@@ -225,5 +237,70 @@
 G1AllocRegion::G1AllocRegion(const char* name,
                              bool bot_updates)
   : _name(name), _bot_updates(bot_updates),
-    _alloc_region(NULL), _count(0), _used_bytes_before(0) { }
+    _alloc_region(NULL), _count(0), _used_bytes_before(0),
+    _allocation_context(AllocationContext::system()) { }
+
+
+HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
+                                                    bool force) {
+  return _g1h->new_mutator_alloc_region(word_size, force);
+}
+
+void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
+                                       size_t allocated_bytes) {
+  _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
+}
+
+HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
+                                                       bool force) {
+  assert(!force, "not supported for GC alloc regions");
+  return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
+}
+
+void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
+                                          size_t allocated_bytes) {
+  _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
+                               GCAllocForSurvived);
+}
+
+HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
+                                                  bool force) {
+  assert(!force, "not supported for GC alloc regions");
+  return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
+}
 
+void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
+                                     size_t allocated_bytes) {
+  _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
+                               GCAllocForTenured);
+}
+
+HeapRegion* OldGCAllocRegion::release() {
+  HeapRegion* cur = get();
+  if (cur != NULL) {
+    // Determine how far we are from the next card boundary. If it is smaller than
+    // the minimum object size we can allocate into, expand into the next card.
+    HeapWord* top = cur->top();
+    HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
+
+    size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
+
+    if (to_allocate_words != 0) {
+      // We are not at a card boundary. Fill up, possibly into the next, taking the
+      // end of the region and the minimum object size into account.
+      to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
+                               MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
+
+      // Skip allocation if there is not enough space to allocate even the smallest
+      // possible object. In this case this region will not be retained, so the
+      // original problem cannot occur.
+      if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
+        HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
+        CollectedHeap::fill_with_object(dummy, to_allocate_words);
+      }
+    }
+  }
+  return G1AllocRegion::release();
+}
+
+
--- a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -57,6 +57,9 @@
   // correct use of init() and release()).
   HeapRegion* volatile _alloc_region;
 
+  // Allocation context associated with this alloc region.
+  AllocationContext_t _allocation_context;
+
   // It keeps track of the distinct number of regions that are used
   // for allocation in the active interval of this object, i.e.,
   // between a call to init() and a call to release(). The count
@@ -110,6 +113,10 @@
   // else can allocate out of it.
   void retire(bool fill_up);
 
+  // After a region is allocated by alloc_new_region, this
+  // method is used to set it as the active alloc_region
+  void update_alloc_region(HeapRegion* alloc_region);
+
   // Allocate a new active region and use it to perform a word_size
   // allocation. The force parameter will be passed on to
   // G1CollectedHeap::allocate_new_alloc_region() and tells it to try
@@ -137,6 +144,9 @@
     return (hr == _dummy_region) ? NULL : hr;
   }
 
+  void set_allocation_context(AllocationContext_t context) { _allocation_context = context; }
+  AllocationContext_t  allocation_context() { return _allocation_context; }
+
   uint count() { return _count; }
 
   // The following two are the building blocks for the allocation method.
@@ -182,6 +192,40 @@
 #endif // G1_ALLOC_REGION_TRACING
 };
 
+class MutatorAllocRegion : public G1AllocRegion {
+protected:
+  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
+  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
+public:
+  MutatorAllocRegion()
+    : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
+};
+
+class SurvivorGCAllocRegion : public G1AllocRegion {
+protected:
+  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
+  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
+public:
+  SurvivorGCAllocRegion()
+  : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
+};
+
+class OldGCAllocRegion : public G1AllocRegion {
+protected:
+  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
+  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
+public:
+  OldGCAllocRegion()
+  : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
+
+  // This specialization of release() makes sure that the last card that has
+  // been allocated into has been completely filled by a dummy object.  This
+  // avoids races when remembered set scanning wants to update the BOT of the
+  // last card in the retained old gc alloc region, and allocation threads
+  // allocating into that card at the same time.
+  virtual HeapRegion* release();
+};
+
 class ar_ext_msg : public err_msg {
 public:
   ar_ext_msg(G1AllocRegion* alloc_region, const char *message) : err_msg("%s", "") {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocationContext.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
+
+#include "memory/allocation.hpp"
+
+typedef unsigned char AllocationContext_t;
+
+class AllocationContext : AllStatic {
+public:
+  // Currently used context
+  static AllocationContext_t current() {
+    return 0;
+  }
+  // System wide default context
+  static AllocationContext_t system() {
+    return 0;
+  }
+};
+
+class AllocationContextStats: public StackObj {
+public:
+  inline void clear() { }
+  inline void update(bool full_gc) { }
+  inline void update_at_remark() { }
+  inline bool available() { return false; }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1Allocator.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
+#include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/heapRegion.inline.hpp"
+#include "gc_implementation/g1/heapRegionSet.inline.hpp"
+
+void G1DefaultAllocator::init_mutator_alloc_region() {
+  assert(_mutator_alloc_region.get() == NULL, "pre-condition");
+  _mutator_alloc_region.init();
+}
+
+void G1DefaultAllocator::release_mutator_alloc_region() {
+  _mutator_alloc_region.release();
+  assert(_mutator_alloc_region.get() == NULL, "post-condition");
+}
+
+void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
+                                            OldGCAllocRegion* old,
+                                            HeapRegion** retained_old) {
+  HeapRegion* retained_region = *retained_old;
+  *retained_old = NULL;
+
+  // We will discard the current GC alloc region if:
+  // a) it's in the collection set (it can happen!),
+  // b) it's already full (no point in using it),
+  // c) it's empty (this means that it was emptied during
+  // a cleanup and it should be on the free list now), or
+  // d) it's humongous (this means that it was emptied
+  // during a cleanup and was added to the free list, but
+  // has been subsequently used to allocate a humongous
+  // object that may be less than the region size).
+  if (retained_region != NULL &&
+      !retained_region->in_collection_set() &&
+      !(retained_region->top() == retained_region->end()) &&
+      !retained_region->is_empty() &&
+      !retained_region->is_humongous()) {
+    retained_region->record_top_and_timestamp();
+    // The retained region was added to the old region set when it was
+    // retired. We have to remove it now, since we don't allow regions
+    // we allocate to in the region sets. We'll re-add it later, when
+    // it's retired again.
+    _g1h->_old_set.remove(retained_region);
+    bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
+    retained_region->note_start_of_copying(during_im);
+    old->set(retained_region);
+    _g1h->_hr_printer.reuse(retained_region);
+    evacuation_info.set_alloc_regions_used_before(retained_region->used());
+  }
+}
+
+void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
+  assert_at_safepoint(true /* should_be_vm_thread */);
+
+  _survivor_gc_alloc_region.init();
+  _old_gc_alloc_region.init();
+  reuse_retained_old_region(evacuation_info,
+                            &_old_gc_alloc_region,
+                            &_retained_old_gc_alloc_region);
+}
+
+void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
+  AllocationContext_t context = AllocationContext::current();
+  evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
+                                         old_gc_alloc_region(context)->count());
+  survivor_gc_alloc_region(context)->release();
+  // If we have an old GC alloc region to release, we'll save it in
+  // _retained_old_gc_alloc_region. If we don't
+  // _retained_old_gc_alloc_region will become NULL. This is what we
+  // want either way so no reason to check explicitly for either
+  // condition.
+  _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
+
+  if (ResizePLAB) {
+    _g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
+    _g1h->_old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
+  }
+}
+
+void G1DefaultAllocator::abandon_gc_alloc_regions() {
+  assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
+  assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
+  _retained_old_gc_alloc_region = NULL;
+}
+
+G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
+  ParGCAllocBuffer(gclab_word_size), _retired(true) { }
+
+HeapWord* G1ParGCAllocator::allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
+  HeapWord* obj = NULL;
+  size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
+  if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
+    G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose, context);
+    add_to_alloc_buffer_waste(alloc_buf->words_remaining());
+    alloc_buf->retire(false /* end_of_gc */, false /* retain */);
+
+    HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size, context);
+    if (buf == NULL) {
+      return NULL; // Let caller handle allocation failure.
+    }
+    // Otherwise.
+    alloc_buf->set_word_size(gclab_word_size);
+    alloc_buf->set_buf(buf);
+
+    obj = alloc_buf->allocate(word_sz);
+    assert(obj != NULL, "buffer was definitely big enough...");
+  } else {
+    obj = _g1h->par_allocate_during_gc(purpose, word_sz, context);
+  }
+  return obj;
+}
+
+G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
+            G1ParGCAllocator(g1h),
+            _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
+            _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)) {
+
+  _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
+  _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
+
+}
+
+void G1DefaultParGCAllocator::retire_alloc_buffers() {
+  for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
+    size_t waste = _alloc_buffers[ap]->words_remaining();
+    add_to_alloc_buffer_waste(waste);
+    _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
+                                               true /* end_of_gc */,
+                                               false /* retain */);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -0,0 +1,242 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
+
+#include "gc_implementation/g1/g1AllocationContext.hpp"
+#include "gc_implementation/g1/g1AllocRegion.hpp"
+#include "gc_implementation/shared/parGCAllocBuffer.hpp"
+
+enum GCAllocPurpose {
+  GCAllocForTenured,
+  GCAllocForSurvived,
+  GCAllocPurposeCount
+};
+
+// Base class for G1 allocators.
+class G1Allocator : public CHeapObj<mtGC> {
+  friend class VMStructs;
+protected:
+  G1CollectedHeap* _g1h;
+
+  // Outside of GC pauses, the number of bytes used in all regions other
+  // than the current allocation region.
+  size_t _summary_bytes_used;
+
+public:
+   G1Allocator(G1CollectedHeap* heap) :
+     _g1h(heap), _summary_bytes_used(0) { }
+
+   static G1Allocator* create_allocator(G1CollectedHeap* g1h);
+
+   virtual void init_mutator_alloc_region() = 0;
+   virtual void release_mutator_alloc_region() = 0;
+
+   virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
+   virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) = 0;
+   virtual void abandon_gc_alloc_regions() = 0;
+
+   virtual MutatorAllocRegion*    mutator_alloc_region(AllocationContext_t context) = 0;
+   virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
+   virtual OldGCAllocRegion*      old_gc_alloc_region(AllocationContext_t context) = 0;
+   virtual size_t                 used() = 0;
+   virtual bool                   is_retained_old_region(HeapRegion* hr) = 0;
+
+   void                           reuse_retained_old_region(EvacuationInfo& evacuation_info,
+                                                            OldGCAllocRegion* old,
+                                                            HeapRegion** retained);
+
+   size_t used_unlocked() const {
+     return _summary_bytes_used;
+   }
+
+   void increase_used(size_t bytes) {
+     _summary_bytes_used += bytes;
+   }
+
+   void decrease_used(size_t bytes) {
+     assert(_summary_bytes_used >= bytes,
+            err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
+                _summary_bytes_used, bytes));
+     _summary_bytes_used -= bytes;
+   }
+
+   void set_used(size_t bytes) {
+     _summary_bytes_used = bytes;
+   }
+
+   virtual HeapRegion* new_heap_region(uint hrs_index,
+                                       G1BlockOffsetSharedArray* sharedOffsetArray,
+                                       MemRegion mr) {
+     return new HeapRegion(hrs_index, sharedOffsetArray, mr);
+   }
+};
+
+// The default allocator for G1.
+class G1DefaultAllocator : public G1Allocator {
+protected:
+  // Alloc region used to satisfy mutator allocation requests.
+  MutatorAllocRegion _mutator_alloc_region;
+
+  // Alloc region used to satisfy allocation requests by the GC for
+  // survivor objects.
+  SurvivorGCAllocRegion _survivor_gc_alloc_region;
+
+  // Alloc region used to satisfy allocation requests by the GC for
+  // old objects.
+  OldGCAllocRegion _old_gc_alloc_region;
+
+  HeapRegion* _retained_old_gc_alloc_region;
+public:
+  G1DefaultAllocator(G1CollectedHeap* heap) : G1Allocator(heap), _retained_old_gc_alloc_region(NULL) { }
+
+  virtual void init_mutator_alloc_region();
+  virtual void release_mutator_alloc_region();
+
+  virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
+  virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
+  virtual void abandon_gc_alloc_regions();
+
+  virtual bool is_retained_old_region(HeapRegion* hr) {
+    return _retained_old_gc_alloc_region == hr;
+  }
+
+  virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) {
+    return &_mutator_alloc_region;
+  }
+
+  virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) {
+    return &_survivor_gc_alloc_region;
+  }
+
+  virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {
+    return &_old_gc_alloc_region;
+  }
+
+  virtual size_t used() {
+    assert(Heap_lock->owner() != NULL,
+           "Should be owned on this thread's behalf.");
+    size_t result = _summary_bytes_used;
+
+    // Read only once in case it is set to NULL concurrently
+    HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
+    if (hr != NULL) {
+      result += hr->used();
+    }
+    return result;
+  }
+};
+
+class G1ParGCAllocBuffer: public ParGCAllocBuffer {
+private:
+  bool _retired;
+
+public:
+  G1ParGCAllocBuffer(size_t gclab_word_size);
+  virtual ~G1ParGCAllocBuffer() {
+    guarantee(_retired, "Allocation buffer has not been retired");
+  }
+
+  virtual void set_buf(HeapWord* buf) {
+    ParGCAllocBuffer::set_buf(buf);
+    _retired = false;
+  }
+
+  virtual void retire(bool end_of_gc, bool retain) {
+    if (_retired) {
+      return;
+    }
+    ParGCAllocBuffer::retire(end_of_gc, retain);
+    _retired = true;
+  }
+};
+
+class G1ParGCAllocator : public CHeapObj<mtGC> {
+  friend class G1ParScanThreadState;
+protected:
+  G1CollectedHeap* _g1h;
+
+  size_t _alloc_buffer_waste;
+  size_t _undo_waste;
+
+  void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
+  void add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
+
+  HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context);
+
+  virtual void retire_alloc_buffers() = 0;
+  virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) = 0;
+
+public:
+  G1ParGCAllocator(G1CollectedHeap* g1h) :
+    _g1h(g1h), _alloc_buffer_waste(0), _undo_waste(0) {
+  }
+
+  static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
+
+  size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
+  size_t undo_waste() {return _undo_waste; }
+
+  HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
+    HeapWord* obj = NULL;
+    if (purpose == GCAllocForSurvived) {
+      obj = alloc_buffer(purpose, context)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
+    } else {
+      obj = alloc_buffer(purpose, context)->allocate(word_sz);
+    }
+    if (obj != NULL) {
+      return obj;
+    }
+    return allocate_slow(purpose, word_sz, context);
+  }
+
+  void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
+    if (alloc_buffer(purpose, context)->contains(obj)) {
+      assert(alloc_buffer(purpose, context)->contains(obj + word_sz - 1),
+             "should contain whole object");
+      alloc_buffer(purpose, context)->undo_allocation(obj, word_sz);
+    } else {
+      CollectedHeap::fill_with_object(obj, word_sz);
+      add_to_undo_waste(word_sz);
+    }
+  }
+};
+
+class G1DefaultParGCAllocator : public G1ParGCAllocator {
+  G1ParGCAllocBuffer  _surviving_alloc_buffer;
+  G1ParGCAllocBuffer  _tenured_alloc_buffer;
+  G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
+
+public:
+  G1DefaultParGCAllocator(G1CollectedHeap* g1h);
+
+  virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) {
+    return _alloc_buffers[purpose];
+  }
+
+  virtual void retire_alloc_buffers() ;
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator_ext.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1Allocator.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
+
+G1Allocator* G1Allocator::create_allocator(G1CollectedHeap* g1h) {
+  return new G1DefaultAllocator(g1h);
+}
+
+G1ParGCAllocator* G1ParGCAllocator::create_allocator(G1CollectedHeap* g1h) {
+  return new G1DefaultParGCAllocator(g1h);
+}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -469,7 +469,7 @@
 // can move in an incremental collection.
 bool G1CollectedHeap::is_scavengable(const void* p) {
   HeapRegion* hr = heap_region_containing(p);
-  return !hr->isHumongous();
+  return !hr->is_humongous();
 }
 
 void G1CollectedHeap::check_ct_logs_at_safepoint() {
@@ -560,7 +560,7 @@
 }
 
 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
-  assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
+  assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
          "the only time we use this to allocate a humongous region is "
          "when we are allocating a single humongous region");
 
@@ -615,9 +615,10 @@
 HeapWord*
 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
                                                            uint num_regions,
-                                                           size_t word_size) {
+                                                           size_t word_size,
+                                                           AllocationContext_t context) {
   assert(first != G1_NO_HRM_INDEX, "pre-condition");
-  assert(isHumongous(word_size), "word_size should be humongous");
+  assert(is_humongous(word_size), "word_size should be humongous");
   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 
   // Index of last region in the series + 1.
@@ -666,14 +667,15 @@
   // will also update the BOT covering all the regions to reflect
   // that there is a single object that starts at the bottom of the
   // first region.
-  first_hr->set_startsHumongous(new_top, new_end);
-
+  first_hr->set_starts_humongous(new_top, new_end);
+  first_hr->set_allocation_context(context);
   // Then, if there are any, we will set up the "continues
   // humongous" regions.
   HeapRegion* hr = NULL;
   for (uint i = first + 1; i < last; ++i) {
     hr = region_at(i);
-    hr->set_continuesHumongous(first_hr);
+    hr->set_continues_humongous(first_hr);
+    hr->set_allocation_context(context);
   }
   // If we have "continues humongous" regions (hr != NULL), then the
   // end of the last one should match new_end.
@@ -711,7 +713,7 @@
   // G1. For example, the code that looks for a consecutive number
   // of empty regions will consider them empty and try to
   // re-allocate them. We can extend is_empty() to also include
-  // !continuesHumongous(), but it is easier to just update the top
+  // !is_continues_humongous(), but it is easier to just update the top
   // fields here. The way we set top for all regions (i.e., top ==
   // end for all regions but the last one, top == new_top for the
   // last one) is actually used when we will free up the humongous
@@ -740,7 +742,7 @@
   check_bitmaps("Humongous Region Allocation", first_hr);
 
   assert(first_hr->used() == word_size * HeapWordSize, "invariant");
-  _summary_bytes_used += first_hr->used();
+  _allocator->increase_used(first_hr->used());
   _humongous_set.add(first_hr);
 
   return new_obj;
@@ -749,7 +751,7 @@
 // If could fit into free regions w/o expansion, try.
 // Otherwise, if can expand, do so.
 // Otherwise, if using ex regions might help, try with ex given back.
-HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
+HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 
   verify_region_sets_optional();
@@ -818,7 +820,8 @@
 
   HeapWord* result = NULL;
   if (first != G1_NO_HRM_INDEX) {
-    result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
+    result = humongous_obj_allocate_initialize_regions(first, obj_regions,
+                                                       word_size, context);
     assert(result != NULL, "it should always return a valid result");
 
     // A successful humongous object allocation changes the used space
@@ -834,7 +837,7 @@
 
 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
   assert_heap_not_locked_and_not_at_safepoint();
-  assert(!isHumongous(word_size), "we do not allow humongous TLABs");
+  assert(!is_humongous(word_size), "we do not allow humongous TLABs");
 
   unsigned int dummy_gc_count_before;
   int dummy_gclocker_retry_count = 0;
@@ -851,7 +854,7 @@
     unsigned int gc_count_before;
 
     HeapWord* result = NULL;
-    if (!isHumongous(word_size)) {
+    if (!is_humongous(word_size)) {
       result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
     } else {
       result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
@@ -862,6 +865,8 @@
 
     // Create the garbage collection operation...
     VM_G1CollectForAllocation op(gc_count_before, word_size);
+    op.set_allocation_context(AllocationContext::current());
+
     // ...and get the VM thread to execute it.
     VMThread::execute(&op);
 
@@ -870,7 +875,7 @@
       // if it is NULL. If the allocation attempt failed immediately
       // after a Full GC, it's unlikely we'll be able to allocate now.
       HeapWord* result = op.result();
-      if (result != NULL && !isHumongous(word_size)) {
+      if (result != NULL && !is_humongous(word_size)) {
         // Allocations that take place on VM operations do not do any
         // card dirtying and we have to do it here. We only have to do
         // this for non-humongous allocations, though.
@@ -897,12 +902,13 @@
 }
 
 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
-                                           unsigned int *gc_count_before_ret,
-                                           int* gclocker_retry_count_ret) {
+                                                   AllocationContext_t context,
+                                                   unsigned int *gc_count_before_ret,
+                                                   int* gclocker_retry_count_ret) {
   // Make sure you read the note in attempt_allocation_humongous().
 
   assert_heap_not_locked_and_not_at_safepoint();
-  assert(!isHumongous(word_size), "attempt_allocation_slow() should not "
+  assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
          "be called for humongous allocation requests");
 
   // We should only get here after the first-level allocation attempt
@@ -919,23 +925,22 @@
 
     {
       MutexLockerEx x(Heap_lock);
-
-      result = _mutator_alloc_region.attempt_allocation_locked(word_size,
-                                                      false /* bot_updates */);
+      result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
+                                                                                    false /* bot_updates */);
       if (result != NULL) {
         return result;
       }
 
       // If we reach here, attempt_allocation_locked() above failed to
       // allocate a new region. So the mutator alloc region should be NULL.
-      assert(_mutator_alloc_region.get() == NULL, "only way to get here");
+      assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here");
 
       if (GC_locker::is_active_and_needs_gc()) {
         if (g1_policy()->can_expand_young_list()) {
           // No need for an ergo verbose message here,
           // can_expand_young_list() does this when it returns true.
-          result = _mutator_alloc_region.attempt_allocation_force(word_size,
-                                                      false /* bot_updates */);
+          result = _allocator->mutator_alloc_region(context)->attempt_allocation_force(word_size,
+                                                                                       false /* bot_updates */);
           if (result != NULL) {
             return result;
           }
@@ -995,8 +1000,8 @@
     // first attempt (without holding the Heap_lock) here and the
     // follow-on attempt will be at the start of the next loop
     // iteration (after taking the Heap_lock).
-    result = _mutator_alloc_region.attempt_allocation(word_size,
-                                                      false /* bot_updates */);
+    result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
+                                                                           false /* bot_updates */);
     if (result != NULL) {
       return result;
     }
@@ -1014,8 +1019,8 @@
 }
 
 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
-                                          unsigned int * gc_count_before_ret,
-                                          int* gclocker_retry_count_ret) {
+                                                        unsigned int * gc_count_before_ret,
+                                                        int* gclocker_retry_count_ret) {
   // The structure of this method has a lot of similarities to
   // attempt_allocation_slow(). The reason these two were not merged
   // into a single one is that such a method would require several "if
@@ -1028,7 +1033,7 @@
   // much as possible.
 
   assert_heap_not_locked_and_not_at_safepoint();
-  assert(isHumongous(word_size), "attempt_allocation_humongous() "
+  assert(is_humongous(word_size), "attempt_allocation_humongous() "
          "should only be called for humongous allocations");
 
   // Humongous objects can exhaust the heap quickly, so we should check if we
@@ -1056,7 +1061,7 @@
       // Given that humongous objects are not allocated in young
       // regions, we'll first try to do the allocation without doing a
       // collection hoping that there's enough space in the heap.
-      result = humongous_obj_allocate(word_size);
+      result = humongous_obj_allocate(word_size, AllocationContext::current());
       if (result != NULL) {
         return result;
       }
@@ -1132,17 +1137,18 @@
 }
 
 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
-                                       bool expect_null_mutator_alloc_region) {
+                                                           AllocationContext_t context,
+                                                           bool expect_null_mutator_alloc_region) {
   assert_at_safepoint(true /* should_be_vm_thread */);
-  assert(_mutator_alloc_region.get() == NULL ||
+  assert(_allocator->mutator_alloc_region(context)->get() == NULL ||
                                              !expect_null_mutator_alloc_region,
          "the current alloc region was unexpectedly found to be non-NULL");
 
-  if (!isHumongous(word_size)) {
-    return _mutator_alloc_region.attempt_allocation_locked(word_size,
+  if (!is_humongous(word_size)) {
+    return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
                                                       false /* bot_updates */);
   } else {
-    HeapWord* result = humongous_obj_allocate(word_size);
+    HeapWord* result = humongous_obj_allocate(word_size, context);
     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
       g1_policy()->set_initiate_conc_mark_if_possible();
     }
@@ -1162,7 +1168,7 @@
   bool doHeapRegion(HeapRegion* r) {
     HeapRegionRemSet* hrrs = r->rem_set();
 
-    if (r->continuesHumongous()) {
+    if (r->is_continues_humongous()) {
       // We'll assert that the strong code root list and RSet is empty
       assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
       assert(hrrs->occupied() == 0, "RSet should be empty");
@@ -1199,7 +1205,7 @@
   { }
 
   bool doHeapRegion(HeapRegion* r) {
-    if (!r->continuesHumongous()) {
+    if (!r->is_continues_humongous()) {
       _cl.set_from(r);
       r->oop_iterate(&_cl);
     }
@@ -1231,14 +1237,14 @@
     assert(!hr->is_young(), "not expecting to find young regions");
     if (hr->is_free()) {
       // We only generate output for non-empty regions.
-    } else if (hr->startsHumongous()) {
+    } else if (hr->is_starts_humongous()) {
       if (hr->region_num() == 1) {
         // single humongous region
         _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
       } else {
         _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
       }
-    } else if (hr->continuesHumongous()) {
+    } else if (hr->is_continues_humongous()) {
       _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
     } else if (hr->is_old()) {
       _hr_printer->post_compaction(hr, G1HRPrinter::Old);
@@ -1342,8 +1348,8 @@
       concurrent_mark()->abort();
 
       // Make sure we'll choose a new allocation region afterwards.
-      release_mutator_alloc_region();
-      abandon_gc_alloc_regions();
+      _allocator->release_mutator_alloc_region();
+      _allocator->abandon_gc_alloc_regions();
       g1_rem_set()->cleanupHRRS();
 
       // We should call this after we retire any currently active alloc
@@ -1515,7 +1521,7 @@
 
       clear_cset_fast_test();
 
-      init_mutator_alloc_region();
+      _allocator->init_mutator_alloc_region();
 
       double end = os::elapsedTime();
       g1_policy()->record_full_collection_end();
@@ -1651,6 +1657,7 @@
 
 HeapWord*
 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
+                                           AllocationContext_t context,
                                            bool* succeeded) {
   assert_at_safepoint(true /* should_be_vm_thread */);
 
@@ -1658,7 +1665,8 @@
   // Let's attempt the allocation first.
   HeapWord* result =
     attempt_allocation_at_safepoint(word_size,
-                                 false /* expect_null_mutator_alloc_region */);
+                                    context,
+                                    false /* expect_null_mutator_alloc_region */);
   if (result != NULL) {
     assert(*succeeded, "sanity");
     return result;
@@ -1668,7 +1676,7 @@
   // incremental pauses.  Therefore, at least for now, we'll favor
   // expansion over collection.  (This might change in the future if we can
   // do something smarter than full collection to satisfy a failed alloc.)
-  result = expand_and_allocate(word_size);
+  result = expand_and_allocate(word_size, context);
   if (result != NULL) {
     assert(*succeeded, "sanity");
     return result;
@@ -1685,7 +1693,8 @@
 
   // Retry the allocation
   result = attempt_allocation_at_safepoint(word_size,
-                                  true /* expect_null_mutator_alloc_region */);
+                                           context,
+                                           true /* expect_null_mutator_alloc_region */);
   if (result != NULL) {
     assert(*succeeded, "sanity");
     return result;
@@ -1702,7 +1711,8 @@
 
   // Retry the allocation once more
   result = attempt_allocation_at_safepoint(word_size,
-                                  true /* expect_null_mutator_alloc_region */);
+                                           context,
+                                           true /* expect_null_mutator_alloc_region */);
   if (result != NULL) {
     assert(*succeeded, "sanity");
     return result;
@@ -1724,7 +1734,7 @@
 // successful, perform the allocation and return the address of the
 // allocated block, or else "NULL".
 
-HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
+HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
   assert_at_safepoint(true /* should_be_vm_thread */);
 
   verify_region_sets_optional();
@@ -1739,7 +1749,8 @@
     _hrm.verify_optional();
     verify_region_sets_optional();
     return attempt_allocation_at_safepoint(word_size,
-                                 false /* expect_null_mutator_alloc_region */);
+                                           context,
+                                           false /* expect_null_mutator_alloc_region */);
   }
   return NULL;
 }
@@ -1816,7 +1827,7 @@
   // We should only reach here at the end of a Full GC which means we
   // should not not be holding to any GC alloc regions. The method
   // below will make sure of that and do any remaining clean up.
-  abandon_gc_alloc_regions();
+  _allocator->abandon_gc_alloc_regions();
 
   // Instead of tearing down / rebuilding the free lists here, we
   // could instead use the remove_all_pending() method on free_list to
@@ -1849,7 +1860,7 @@
   _bot_shared(NULL),
   _evac_failure_scan_stack(NULL),
   _mark_in_progress(false),
-  _cg1r(NULL), _summary_bytes_used(0),
+  _cg1r(NULL),
   _g1mm(NULL),
   _refine_cte_cl(NULL),
   _full_collection(false),
@@ -1861,7 +1872,6 @@
   _free_regions_coming(false),
   _young_list(new YoungList(this)),
   _gc_time_stamp(0),
-  _retained_old_gc_alloc_region(NULL),
   _survivor_plab_stats(YoungPLABSize, PLABWeight),
   _old_plab_stats(OldPLABSize, PLABWeight),
   _expand_heap_after_alloc_failure(true),
@@ -1884,6 +1894,7 @@
     vm_exit_during_initialization("Failed necessary allocation.");
   }
 
+  _allocator = G1Allocator::create_allocator(_g1h);
   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
 
   int n_queues = MAX2((int)ParallelGCThreads, 1);
@@ -1960,15 +1971,10 @@
   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
                                                  heap_alignment);
 
-  // It is important to do this in a way such that concurrent readers can't
-  // temporarily think something is in the heap.  (I've actually seen this
-  // happen in asserts: DLD.)
-  _reserved.set_word_size(0);
-  _reserved.set_start((HeapWord*)heap_rs.base());
-  _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
+  initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 
   // Create the gen rem set (and barrier set) for the entire reserved region.
-  _rem_set = collector_policy()->create_rem_set(_reserved, 2);
+  _rem_set = collector_policy()->create_rem_set(reserved_region(), 2);
   set_barrier_set(rem_set()->bs());
   if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
     vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
@@ -2052,7 +2058,7 @@
 
   FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
 
-  _bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);
+  _bot_shared = new G1BlockOffsetSharedArray(reserved_region(), bot_storage);
 
   _g1h = this;
 
@@ -2127,7 +2133,7 @@
   dummy_region->set_top(dummy_region->end());
   G1AllocRegion::setup(this, dummy_region);
 
-  init_mutator_alloc_region();
+  _allocator->init_mutator_alloc_region();
 
   // Do create of the monitoring and management support so that
   // values in the heap have been properly initialized.
@@ -2237,14 +2243,14 @@
 }
 
 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
-  assert(!hr->continuesHumongous(), "pre-condition");
+  assert(!hr->is_continues_humongous(), "pre-condition");
   hr->reset_gc_time_stamp();
-  if (hr->startsHumongous()) {
+  if (hr->is_starts_humongous()) {
     uint first_index = hr->hrm_index() + 1;
     uint last_index = hr->last_hc_index();
     for (uint i = first_index; i < last_index; i += 1) {
       HeapRegion* chr = region_at(i);
-      assert(chr->continuesHumongous(), "sanity");
+      assert(chr->is_continues_humongous(), "sanity");
       chr->reset_gc_time_stamp();
     }
   }
@@ -2301,21 +2307,12 @@
 
 
 // Computes the sum of the storage used by the various regions.
-
 size_t G1CollectedHeap::used() const {
-  assert(Heap_lock->owner() != NULL,
-         "Should be owned on this thread's behalf.");
-  size_t result = _summary_bytes_used;
-  // Read only once in case it is set to NULL concurrently
-  HeapRegion* hr = _mutator_alloc_region.get();
-  if (hr != NULL)
-    result += hr->used();
-  return result;
+  return _allocator->used();
 }
 
 size_t G1CollectedHeap::used_unlocked() const {
-  size_t result = _summary_bytes_used;
-  return result;
+  return _allocator->used_unlocked();
 }
 
 class SumUsedClosure: public HeapRegionClosure {
@@ -2323,7 +2320,7 @@
 public:
   SumUsedClosure() : _used(0) {}
   bool doHeapRegion(HeapRegion* r) {
-    if (!r->continuesHumongous()) {
+    if (!r->is_continues_humongous()) {
       _used += r->used();
     }
     return false;
@@ -2355,11 +2352,12 @@
   // Let's fill up most of the region
   size_t word_size = HeapRegion::GrainWords - 1024;
   // And as a result the region we'll allocate will be humongous.
-  guarantee(isHumongous(word_size), "sanity");
+  guarantee(is_humongous(word_size), "sanity");
 
   for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
     // Let's use the existing mechanism for the allocation
-    HeapWord* dummy_obj = humongous_obj_allocate(word_size);
+    HeapWord* dummy_obj = humongous_obj_allocate(word_size,
+                                                 AllocationContext::system());
     if (dummy_obj != NULL) {
       MemRegion mr(dummy_obj, word_size);
       CollectedHeap::fill_with_object(mr);
@@ -2510,6 +2508,7 @@
                                  true,  /* should_initiate_conc_mark */
                                  g1_policy()->max_pause_time_ms(),
                                  cause);
+      op.set_allocation_context(AllocationContext::current());
 
       VMThread::execute(&op);
       if (!op.pause_succeeded()) {
@@ -2581,7 +2580,7 @@
 public:
   IterateOopClosureRegionClosure(ExtendedOopClosure* cl) : _cl(cl) {}
   bool doHeapRegion(HeapRegion* r) {
-    if (!r->continuesHumongous()) {
+    if (!r->is_continues_humongous()) {
       r->oop_iterate(_cl);
     }
     return false;
@@ -2600,7 +2599,7 @@
 public:
   IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
   bool doHeapRegion(HeapRegion* r) {
-    if (! r->continuesHumongous()) {
+    if (!r->is_continues_humongous()) {
       r->object_iterate(_cl);
     }
     return false;
@@ -2682,11 +2681,11 @@
                              r->claim_value(), _claim_value);
       ++_failures;
     }
-    if (!r->isHumongous()) {
+    if (!r->is_humongous()) {
       _sh_region = NULL;
-    } else if (r->startsHumongous()) {
+    } else if (r->is_starts_humongous()) {
       _sh_region = r;
-    } else if (r->continuesHumongous()) {
+    } else if (r->is_continues_humongous()) {
       if (r->humongous_start_region() != _sh_region) {
         gclog_or_tty->print_cr("Region " HR_FORMAT ", "
                                "HS = "PTR_FORMAT", should be "PTR_FORMAT,
@@ -2720,7 +2719,7 @@
 
   bool doHeapRegion(HeapRegion* hr) {
     assert(hr->in_collection_set(), "how?");
-    assert(!hr->isHumongous(), "H-region in CSet");
+    assert(!hr->is_humongous(), "H-region in CSet");
     if (hr->claim_value() != _claim_value) {
       gclog_or_tty->print_cr("CSet Region " HR_FORMAT ", "
                              "claim value = %d, should be %d",
@@ -2859,7 +2858,7 @@
 
 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
   HeapRegion* result = _hrm.next_region_in_heap(from);
-  while (result != NULL && result->isHumongous()) {
+  while (result != NULL && result->is_humongous()) {
     result = _hrm.next_region_in_heap(result);
   }
   return result;
@@ -2910,7 +2909,7 @@
   // since we can't allow tlabs to grow big enough to accommodate
   // humongous objects.
 
-  HeapRegion* hr = _mutator_alloc_region.get();
+  HeapRegion* hr = _allocator->mutator_alloc_region(AllocationContext::current())->get();
   size_t max_tlab = max_tlab_size() * wordSize;
   if (hr == NULL) {
     return max_tlab;
@@ -3219,7 +3218,7 @@
   }
 
   bool doHeapRegion(HeapRegion* r) {
-    if (!r->continuesHumongous()) {
+    if (!r->is_continues_humongous()) {
       bool failures = false;
       r->verify(_vo, &failures);
       if (failures) {
@@ -3597,7 +3596,7 @@
   }
 }
 
-void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
+void G1CollectedHeap::gc_epilogue(bool full) {
 
   if (G1SummarizeRSetStats &&
       (G1SummarizeRSetStatsPeriod > 0) &&
@@ -3614,6 +3613,7 @@
   // always_do_update_barrier = true;
 
   resize_all_tlabs();
+  allocation_context_stats().update(full);
 
   // We have just completed a GC. Update the soft reference
   // policy with the new heap occupancy
@@ -3631,6 +3631,8 @@
                              false, /* should_initiate_conc_mark */
                              g1_policy()->max_pause_time_ms(),
                              gc_cause);
+
+  op.set_allocation_context(AllocationContext::current());
   VMThread::execute(&op);
 
   HeapWord* result = op.result();
@@ -3676,7 +3678,7 @@
 
 bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
   HeapRegion* region = region_at(index);
-  assert(region->startsHumongous(), "Must start a humongous object");
+  assert(region->is_starts_humongous(), "Must start a humongous object");
   return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
 }
 
@@ -3689,7 +3691,7 @@
   }
 
   virtual bool doHeapRegion(HeapRegion* r) {
-    if (!r->startsHumongous()) {
+    if (!r->is_starts_humongous()) {
       return false;
     }
     G1CollectedHeap* g1h = G1CollectedHeap::heap();
@@ -3961,7 +3963,7 @@
 
         // Forget the current alloc region (we might even choose it to be part
         // of the collection set!).
-        release_mutator_alloc_region();
+        _allocator->release_mutator_alloc_region();
 
         // We should call this after we retire the mutator alloc
         // region(s) so that all the ALLOC / RETIRE events are generated
@@ -4044,7 +4046,7 @@
         setup_surviving_young_words();
 
         // Initialize the GC alloc regions.
-        init_gc_alloc_regions(evacuation_info);
+        _allocator->init_gc_alloc_regions(evacuation_info);
 
         // Actually do the work...
         evacuate_collection_set(evacuation_info);
@@ -4093,7 +4095,7 @@
         _young_list->reset_auxilary_lists();
 
         if (evacuation_failed()) {
-          _summary_bytes_used = recalculate_used();
+          _allocator->set_used(recalculate_used());
           uint n_queues = MAX2((int)ParallelGCThreads, 1);
           for (uint i = 0; i < n_queues; i++) {
             if (_evacuation_failed_info_array[i].has_failed()) {
@@ -4103,7 +4105,7 @@
         } else {
           // The "used" of the the collection set have already been subtracted
           // when they were freed.  Add in the bytes evacuated.
-          _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
+          _allocator->increase_used(g1_policy()->bytes_copied_during_gc());
         }
 
         if (g1_policy()->during_initial_mark_pause()) {
@@ -4125,7 +4127,7 @@
         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
 #endif // YOUNG_LIST_VERBOSE
 
-        init_mutator_alloc_region();
+        _allocator->init_mutator_alloc_region();
 
         {
           size_t expand_bytes = g1_policy()->expansion_amount();
@@ -4270,80 +4272,6 @@
   return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
 }
 
-void G1CollectedHeap::init_mutator_alloc_region() {
-  assert(_mutator_alloc_region.get() == NULL, "pre-condition");
-  _mutator_alloc_region.init();
-}
-
-void G1CollectedHeap::release_mutator_alloc_region() {
-  _mutator_alloc_region.release();
-  assert(_mutator_alloc_region.get() == NULL, "post-condition");
-}
-
-void G1CollectedHeap::use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info) {
-  HeapRegion* retained_region = _retained_old_gc_alloc_region;
-  _retained_old_gc_alloc_region = NULL;
-
-  // We will discard the current GC alloc region if:
-  // a) it's in the collection set (it can happen!),
-  // b) it's already full (no point in using it),
-  // c) it's empty (this means that it was emptied during
-  // a cleanup and it should be on the free list now), or
-  // d) it's humongous (this means that it was emptied
-  // during a cleanup and was added to the free list, but
-  // has been subsequently used to allocate a humongous
-  // object that may be less than the region size).
-  if (retained_region != NULL &&
-      !retained_region->in_collection_set() &&
-      !(retained_region->top() == retained_region->end()) &&
-      !retained_region->is_empty() &&
-      !retained_region->isHumongous()) {
-    retained_region->record_top_and_timestamp();
-    // The retained region was added to the old region set when it was
-    // retired. We have to remove it now, since we don't allow regions
-    // we allocate to in the region sets. We'll re-add it later, when
-    // it's retired again.
-    _old_set.remove(retained_region);
-    bool during_im = g1_policy()->during_initial_mark_pause();
-    retained_region->note_start_of_copying(during_im);
-    _old_gc_alloc_region.set(retained_region);
-    _hr_printer.reuse(retained_region);
-    evacuation_info.set_alloc_regions_used_before(retained_region->used());
-  }
-}
-
-void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
-  assert_at_safepoint(true /* should_be_vm_thread */);
-
-  _survivor_gc_alloc_region.init();
-  _old_gc_alloc_region.init();
-
-  use_retained_old_gc_alloc_region(evacuation_info);
-}
-
-void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
-  evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() +
-                                         _old_gc_alloc_region.count());
-  _survivor_gc_alloc_region.release();
-  // If we have an old GC alloc region to release, we'll save it in
-  // _retained_old_gc_alloc_region. If we don't
-  // _retained_old_gc_alloc_region will become NULL. This is what we
-  // want either way so no reason to check explicitly for either
-  // condition.
-  _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
-
-  if (ResizePLAB) {
-    _survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
-    _old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
-  }
-}
-
-void G1CollectedHeap::abandon_gc_alloc_regions() {
-  assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
-  assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
-  _retained_old_gc_alloc_region = NULL;
-}
-
 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
   _drain_in_progress = false;
   set_evac_failure_closure(cl);
@@ -4484,25 +4412,26 @@
 }
 
 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
-                                                  size_t word_size) {
+                                                  size_t word_size,
+                                                  AllocationContext_t context) {
   if (purpose == GCAllocForSurvived) {
-    HeapWord* result = survivor_attempt_allocation(word_size);
+    HeapWord* result = survivor_attempt_allocation(word_size, context);
     if (result != NULL) {
       return result;
     } else {
       // Let's try to allocate in the old gen in case we can fit the
       // object there.
-      return old_attempt_allocation(word_size);
+      return old_attempt_allocation(word_size, context);
     }
   } else {
     assert(purpose ==  GCAllocForTenured, "sanity");
-    HeapWord* result = old_attempt_allocation(word_size);
+    HeapWord* result = old_attempt_allocation(word_size, context);
     if (result != NULL) {
       return result;
     } else {
       // Let's try to allocate in the survivors in case we can fit the
       // object there.
-      return survivor_attempt_allocation(word_size);
+      return survivor_attempt_allocation(word_size, context);
     }
   }
 
@@ -4511,9 +4440,6 @@
   return NULL;
 }
 
-G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
-  ParGCAllocBuffer(gclab_word_size), _retired(true) { }
-
 void G1ParCopyHelper::mark_object(oop obj) {
   assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
 
@@ -5087,7 +5013,11 @@
       _num_entered_barrier(0)
   {
     nmethod::increase_unloading_clock();
-    _first_nmethod = CodeCache::alive_nmethod(CodeCache::first());
+    // Get first alive nmethod
+    NMethodIterator iter = NMethodIterator();
+    if(iter.next_alive()) {
+      _first_nmethod = iter.method();
+    }
     _claimed_nmethod = (volatile nmethod*)_first_nmethod;
   }
 
@@ -5130,27 +5060,26 @@
 
   void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {
     nmethod* first;
-    nmethod* last;
+    NMethodIterator last;
 
     do {
       *num_claimed_nmethods = 0;
 
-      first = last = (nmethod*)_claimed_nmethod;
+      first = (nmethod*)_claimed_nmethod;
+      last = NMethodIterator(first);
 
       if (first != NULL) {
+
         for (int i = 0; i < MaxClaimNmethods; i++) {
-          last = CodeCache::alive_nmethod(CodeCache::next(last));
-
-          if (last == NULL) {
+          if (!last.next_alive()) {
             break;
           }
-
-          claimed_nmethods[i] = last;
+          claimed_nmethods[i] = last.method();
           (*num_claimed_nmethods)++;
         }
       }
 
-    } while ((nmethod*)Atomic::cmpxchg_ptr(last, &_claimed_nmethod, first) != first);
+    } while ((nmethod*)Atomic::cmpxchg_ptr(last.method(), &_claimed_nmethod, first) != first);
   }
 
   nmethod* claim_postponed_nmethod() {
@@ -6008,7 +5937,7 @@
     }
   }
 
-  release_gc_alloc_regions(n_workers, evacuation_info);
+  _allocator->release_gc_alloc_regions(n_workers, evacuation_info);
   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
 
   // Reset and re-enable the hot card cache.
@@ -6075,7 +6004,7 @@
 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
                                      FreeRegionList* free_list,
                                      bool par) {
-  assert(hr->startsHumongous(), "this is only for starts humongous regions");
+  assert(hr->is_starts_humongous(), "this is only for starts humongous regions");
   assert(free_list != NULL, "pre-condition");
 
   size_t hr_capacity = hr->capacity();
@@ -6088,7 +6017,7 @@
   uint i = hr->hrm_index() + 1;
   while (i < last_index) {
     HeapRegion* curr_hr = region_at(i);
-    assert(curr_hr->continuesHumongous(), "invariant");
+    assert(curr_hr->is_continues_humongous(), "invariant");
     curr_hr->clear_humongous();
     free_region(curr_hr, free_list, par);
     i += 1;
@@ -6114,10 +6043,7 @@
 }
 
 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
-  assert(_summary_bytes_used >= bytes,
-         err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
-                  _summary_bytes_used, bytes));
-  _summary_bytes_used -= bytes;
+  _allocator->decrease_used(bytes);
 }
 
 class G1ParCleanupCTTask : public AbstractGangTask {
@@ -6259,7 +6185,7 @@
   bool failures() { return _failures; }
 
   virtual bool doHeapRegion(HeapRegion* hr) {
-    if (hr->continuesHumongous()) return false;
+    if (hr->is_continues_humongous()) return false;
 
     bool result = _g1h->verify_bitmaps(_caller, hr);
     if (!result) {
@@ -6438,7 +6364,7 @@
   }
 
   virtual bool doHeapRegion(HeapRegion* r) {
-    if (!r->startsHumongous()) {
+    if (!r->is_starts_humongous()) {
       return false;
     }
 
@@ -6484,7 +6410,7 @@
 
       if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
         gclog_or_tty->print_cr("Live humongous %d region %d with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
-                               r->isHumongous(),
+                               r->is_humongous(),
                                region_idx,
                                r->rem_set()->occupied(),
                                r->rem_set()->strong_code_roots_list_length(),
@@ -6503,7 +6429,7 @@
 
     if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
       gclog_or_tty->print_cr("Reclaim humongous region %d start "PTR_FORMAT" region %d length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
-                             r->isHumongous(),
+                             r->is_humongous(),
                              r->bottom(),
                              region_idx,
                              r->region_num(),
@@ -6693,7 +6619,7 @@
       // We ignore young regions, we'll empty the young list afterwards.
       // We ignore humongous regions, we're not tearing down the
       // humongous regions set.
-      assert(r->is_free() || r->is_young() || r->isHumongous(),
+      assert(r->is_free() || r->is_young() || r->is_humongous(),
              "it cannot be another type");
     }
     return false;
@@ -6738,18 +6664,19 @@
   }
 
   bool doHeapRegion(HeapRegion* r) {
-    if (r->continuesHumongous()) {
+    if (r->is_continues_humongous()) {
       return false;
     }
 
     if (r->is_empty()) {
       // Add free regions to the free list
       r->set_free();
+      r->set_allocation_context(AllocationContext::system());
       _hrm->insert_into_free_list(r);
     } else if (!_free_list_only) {
       assert(!r->is_young(), "we should not come across young regions");
 
-      if (r->isHumongous()) {
+      if (r->is_humongous()) {
         // We ignore humongous regions, we left the humongous set unchanged
       } else {
         // Objects that were compacted would have ended up on regions
@@ -6781,12 +6708,12 @@
   heap_region_iterate(&cl);
 
   if (!free_list_only) {
-    _summary_bytes_used = cl.total_used();
-  }
-  assert(_summary_bytes_used == recalculate_used(),
-         err_msg("inconsistent _summary_bytes_used, "
+    _allocator->set_used(cl.total_used());
+  }
+  assert(_allocator->used_unlocked() == recalculate_used(),
+         err_msg("inconsistent _allocator->used_unlocked(), "
                  "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
-                 _summary_bytes_used, recalculate_used()));
+                 _allocator->used_unlocked(), recalculate_used()));
 }
 
 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
@@ -6826,7 +6753,7 @@
   assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
 
   g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
-  _summary_bytes_used += allocated_bytes;
+  _allocator->increase_used(allocated_bytes);
   _hr_printer.retire(alloc_region);
   // We update the eden sizes here, when the region is retired,
   // instead of when it's allocated, since this is the point that its
@@ -6834,11 +6761,6 @@
   g1mm()->update_eden_size();
 }
 
-HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
-                                                    bool force) {
-  return _g1h->new_mutator_alloc_region(word_size, force);
-}
-
 void G1CollectedHeap::set_par_threads() {
   // Don't change the number of workers.  Use the value previously set
   // in the workgroup.
@@ -6855,11 +6777,6 @@
   set_par_threads(n_workers);
 }
 
-void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
-                                       size_t allocated_bytes) {
-  _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
-}
-
 // Methods for the GC alloc regions
 
 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
@@ -6910,58 +6827,6 @@
   _hr_printer.retire(alloc_region);
 }
 
-HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
-                                                       bool force) {
-  assert(!force, "not supported for GC alloc regions");
-  return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
-}
-
-void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
-                                          size_t allocated_bytes) {
-  _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
-                               GCAllocForSurvived);
-}
-
-HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
-                                                  bool force) {
-  assert(!force, "not supported for GC alloc regions");
-  return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
-}
-
-void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
-                                     size_t allocated_bytes) {
-  _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
-                               GCAllocForTenured);
-}
-
-HeapRegion* OldGCAllocRegion::release() {
-  HeapRegion* cur = get();
-  if (cur != NULL) {
-    // Determine how far we are from the next card boundary. If it is smaller than
-    // the minimum object size we can allocate into, expand into the next card.
-    HeapWord* top = cur->top();
-    HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
-
-    size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
-
-    if (to_allocate_words != 0) {
-      // We are not at a card boundary. Fill up, possibly into the next, taking the
-      // end of the region and the minimum object size into account.
-      to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
-                               MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
-
-      // Skip allocation if there is not enough space to allocate even the smallest
-      // possible object. In this case this region will not be retained, so the
-      // original problem cannot occur.
-      if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
-        HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
-        CollectedHeap::fill_with_object(dummy, to_allocate_words);
-      }
-    }
-  }
-  return G1AllocRegion::release();
-}
-
 // Heap region set verification
 
 class VerifyRegionListsClosure : public HeapRegionClosure {
@@ -6982,13 +6847,13 @@
     _old_count(), _humongous_count(), _free_count(){ }
 
   bool doHeapRegion(HeapRegion* hr) {
-    if (hr->continuesHumongous()) {
+    if (hr->is_continues_humongous()) {
       return false;
     }
 
     if (hr->is_young()) {
       // TODO
-    } else if (hr->startsHumongous()) {
+    } else if (hr->is_starts_humongous()) {
       assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrm_index()));
       _humongous_count.increment(1u, hr->capacity());
     } else if (hr->is_empty()) {
@@ -7069,7 +6934,7 @@
     if (!oopDesc::is_null(heap_oop)) {
       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
       HeapRegion* hr = _g1h->heap_region_containing(obj);
-      assert(!hr->continuesHumongous(),
+      assert(!hr->is_continues_humongous(),
              err_msg("trying to add code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
                      " starting at "HR_FORMAT,
                      _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
@@ -7096,7 +6961,7 @@
     if (!oopDesc::is_null(heap_oop)) {
       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
       HeapRegion* hr = _g1h->heap_region_containing(obj);
-      assert(!hr->continuesHumongous(),
+      assert(!hr->is_continues_humongous(),
              err_msg("trying to remove code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
                      " starting at "HR_FORMAT,
                      _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -25,6 +25,8 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
 
+#include "gc_implementation/g1/g1AllocationContext.hpp"
+#include "gc_implementation/g1/g1Allocator.hpp"
 #include "gc_implementation/g1/concurrentMark.hpp"
 #include "gc_implementation/g1/evacuationInfo.hpp"
 #include "gc_implementation/g1/g1AllocRegion.hpp"
@@ -80,12 +82,6 @@
 typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
 
-enum GCAllocPurpose {
-  GCAllocForTenured,
-  GCAllocForSurvived,
-  GCAllocPurposeCount
-};
-
 class YoungList : public CHeapObj<mtGC> {
 private:
   G1CollectedHeap* _g1h;
@@ -158,40 +154,6 @@
   void          print();
 };
 
-class MutatorAllocRegion : public G1AllocRegion {
-protected:
-  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
-  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
-public:
-  MutatorAllocRegion()
-    : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
-};
-
-class SurvivorGCAllocRegion : public G1AllocRegion {
-protected:
-  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
-  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
-public:
-  SurvivorGCAllocRegion()
-  : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
-};
-
-class OldGCAllocRegion : public G1AllocRegion {
-protected:
-  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
-  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
-public:
-  OldGCAllocRegion()
-  : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
-
-  // This specialization of release() makes sure that the last card that has been
-  // allocated into has been completely filled by a dummy object.
-  // This avoids races when remembered set scanning wants to update the BOT of the
-  // last card in the retained old gc alloc region, and allocation threads
-  // allocating into that card at the same time.
-  virtual HeapRegion* release();
-};
-
 // The G1 STW is alive closure.
 // An instance is embedded into the G1CH and used as the
 // (optional) _is_alive_non_header closure in the STW
@@ -222,6 +184,9 @@
   friend class MutatorAllocRegion;
   friend class SurvivorGCAllocRegion;
   friend class OldGCAllocRegion;
+  friend class G1Allocator;
+  friend class G1DefaultAllocator;
+  friend class G1ResManAllocator;
 
   // Closures used in implementation.
   template <G1Barrier barrier, G1Mark do_mark_object>
@@ -232,6 +197,8 @@
   friend class G1ParScanClosureSuper;
   friend class G1ParEvacuateFollowersClosure;
   friend class G1ParTask;
+  friend class G1ParGCAllocator;
+  friend class G1DefaultParGCAllocator;
   friend class G1FreeGarbageRegionClosure;
   friend class RefineCardTableEntryClosure;
   friend class G1PrepareCompactClosure;
@@ -293,44 +260,18 @@
   // The sequence of all heap regions in the heap.
   HeapRegionManager _hrm;
 
-  // Alloc region used to satisfy mutator allocation requests.
-  MutatorAllocRegion _mutator_alloc_region;
+  // Class that handles the different kinds of allocations.
+  G1Allocator* _allocator;
 
-  // Alloc region used to satisfy allocation requests by the GC for
-  // survivor objects.
-  SurvivorGCAllocRegion _survivor_gc_alloc_region;
+  // Statistics for each allocation context
+  AllocationContextStats _allocation_context_stats;
 
   // PLAB sizing policy for survivors.
   PLABStats _survivor_plab_stats;
 
-  // Alloc region used to satisfy allocation requests by the GC for
-  // old objects.
-  OldGCAllocRegion _old_gc_alloc_region;
-
   // PLAB sizing policy for tenured objects.
   PLABStats _old_plab_stats;
 
-  PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
-    PLABStats* stats = NULL;
-
-    switch (purpose) {
-    case GCAllocForSurvived:
-      stats = &_survivor_plab_stats;
-      break;
-    case GCAllocForTenured:
-      stats = &_old_plab_stats;
-      break;
-    default:
-      assert(false, "unrecognized GCAllocPurpose");
-    }
-
-    return stats;
-  }
-
-  // The last old region we allocated to during the last GC.
-  // Typically, it is not full so we should re-use it during the next GC.
-  HeapRegion* _retained_old_gc_alloc_region;
-
   // It specifies whether we should attempt to expand the heap after a
   // region allocation failure. If heap expansion fails we set this to
   // false so that we don't re-attempt the heap expansion (it's likely
@@ -348,9 +289,6 @@
   // It initializes the GC alloc regions at the start of a GC.
   void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
 
-  // Setup the retained old gc alloc region as the currrent old gc alloc region.
-  void use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info);
-
   // It releases the GC alloc regions at the end of a GC.
   void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
 
@@ -361,13 +299,6 @@
   // Helper for monitoring and management support.
   G1MonitoringSupport* _g1mm;
 
-  // Determines PLAB size for a particular allocation purpose.
-  size_t desired_plab_sz(GCAllocPurpose purpose);
-
-  // Outside of GC pauses, the number of bytes used in all regions other
-  // than the current allocation region.
-  size_t _summary_bytes_used;
-
   // Records whether the region at the given index is kept live by roots or
   // references from the young generation.
   class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> {
@@ -526,11 +457,12 @@
   // humongous region.
   HeapWord* humongous_obj_allocate_initialize_regions(uint first,
                                                       uint num_regions,
-                                                      size_t word_size);
+                                                      size_t word_size,
+                                                      AllocationContext_t context);
 
   // Attempt to allocate a humongous object of the given size. Return
   // NULL if unsuccessful.
-  HeapWord* humongous_obj_allocate(size_t word_size);
+  HeapWord* humongous_obj_allocate(size_t word_size, AllocationContext_t context);
 
   // The following two methods, allocate_new_tlab() and
   // mem_allocate(), are the two main entry points from the runtime
@@ -586,6 +518,7 @@
   // retry the allocation attempt, potentially scheduling a GC
   // pause. This should only be used for non-humongous allocations.
   HeapWord* attempt_allocation_slow(size_t word_size,
+                                    AllocationContext_t context,
                                     unsigned int* gc_count_before_ret,
                                     int* gclocker_retry_count_ret);
 
@@ -600,7 +533,8 @@
   // specifies whether the mutator alloc region is expected to be NULL
   // or not.
   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
-                                       bool expect_null_mutator_alloc_region);
+                                            AllocationContext_t context,
+                                            bool expect_null_mutator_alloc_region);
 
   // It dirties the cards that cover the block so that so that the post
   // write barrier never queues anything when updating objects on this
@@ -612,7 +546,9 @@
   // allocation region, either by picking one or expanding the
   // heap, and then allocate a block of the given size. The block
   // may not be a humongous - it must fit into a single heap region.
-  HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
+  HeapWord* par_allocate_during_gc(GCAllocPurpose purpose,
+                                   size_t word_size,
+                                   AllocationContext_t context);
 
   HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
                                     HeapRegion*    alloc_region,
@@ -624,10 +560,12 @@
   void par_allocate_remaining_space(HeapRegion* r);
 
   // Allocation attempt during GC for a survivor object / PLAB.
-  inline HeapWord* survivor_attempt_allocation(size_t word_size);
+  inline HeapWord* survivor_attempt_allocation(size_t word_size,
+                                               AllocationContext_t context);
 
   // Allocation attempt during GC for an old object / PLAB.
-  inline HeapWord* old_attempt_allocation(size_t word_size);
+  inline HeapWord* old_attempt_allocation(size_t word_size,
+                                          AllocationContext_t context);
 
   // These methods are the "callbacks" from the G1AllocRegion class.
 
@@ -666,13 +604,15 @@
   // Callback from VM_G1CollectForAllocation operation.
   // This function does everything necessary/possible to satisfy a
   // failed allocation request (including collection, expansion, etc.)
-  HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
+  HeapWord* satisfy_failed_allocation(size_t word_size,
+                                      AllocationContext_t context,
+                                      bool* succeeded);
 
   // Attempting to expand the heap sufficiently
   // to support an allocation of the given "word_size".  If
   // successful, perform the allocation and return the address of the
   // allocated block, or else "NULL".
-  HeapWord* expand_and_allocate(size_t word_size);
+  HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
 
   // Process any reference objects discovered during
   // an incremental evacuation pause.
@@ -684,6 +624,10 @@
 
 public:
 
+  G1Allocator* allocator() {
+    return _allocator;
+  }
+
   G1MonitoringSupport* g1mm() {
     assert(_g1mm != NULL, "should have been initialized");
     return _g1mm;
@@ -695,6 +639,29 @@
   // (Rounds up to a HeapRegion boundary.)
   bool expand(size_t expand_bytes);
 
+  // Returns the PLAB statistics given a purpose.
+  PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
+    PLABStats* stats = NULL;
+
+    switch (purpose) {
+    case GCAllocForSurvived:
+      stats = &_survivor_plab_stats;
+      break;
+    case GCAllocForTenured:
+      stats = &_old_plab_stats;
+      break;
+    default:
+      assert(false, "unrecognized GCAllocPurpose");
+    }
+
+    return stats;
+  }
+
+  // Determines PLAB size for a particular allocation purpose.
+  size_t desired_plab_sz(GCAllocPurpose purpose);
+
+  inline AllocationContextStats& allocation_context_stats();
+
   // Do anything common to GC's.
   virtual void gc_prologue(bool full);
   virtual void gc_epilogue(bool full);
@@ -1272,7 +1239,7 @@
   // Determine whether the given region is one that we are using as an
   // old GC alloc region.
   bool is_old_gc_alloc_region(HeapRegion* hr) {
-    return hr == _retained_old_gc_alloc_region;
+    return _allocator->is_retained_old_region(hr);
   }
 
   // Perform a collection of the heap; intended for use in implementing
@@ -1283,6 +1250,11 @@
   // The same as above but assume that the caller holds the Heap_lock.
   void collect_locked(GCCause::Cause cause);
 
+  virtual void copy_allocation_context_stats(const jint* contexts,
+                                             jlong* totals,
+                                             jbyte* accuracy,
+                                             jint len);
+
   // True iff an evacuation has failed in the most-recent collection.
   bool evacuation_failed() { return _evacuation_failed; }
 
@@ -1540,7 +1512,7 @@
   virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
 
   // Returns "true" iff the given word_size is "very large".
-  static bool isHumongous(size_t word_size) {
+  static bool is_humongous(size_t word_size) {
     // Note this has to be strictly greater-than as the TLABs
     // are capped at the humongous threshold and we want to
     // ensure that we don't try to allocate a TLAB as
@@ -1747,28 +1719,4 @@
   size_t _max_heap_capacity;
 };
 
-class G1ParGCAllocBuffer: public ParGCAllocBuffer {
-private:
-  bool        _retired;
-
-public:
-  G1ParGCAllocBuffer(size_t gclab_word_size);
-  virtual ~G1ParGCAllocBuffer() {
-    guarantee(_retired, "Allocation buffer has not been retired");
-  }
-
-  virtual void set_buf(HeapWord* buf) {
-    ParGCAllocBuffer::set_buf(buf);
-    _retired = false;
-  }
-
-  virtual void retire(bool end_of_gc, bool retain) {
-    if (_retired) {
-      return;
-    }
-    ParGCAllocBuffer::retire(end_of_gc, retain);
-    _retired = true;
-  }
-};
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -37,14 +37,18 @@
 
 // Inline functions for G1CollectedHeap
 
+inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() {
+  return _allocation_context_stats;
+}
+
 // Return the region with the given index. It assumes the index is valid.
 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
 
 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
   assert(is_in_reserved(addr),
          err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")",
-                 p2i(addr), p2i(_reserved.start()), p2i(_reserved.end())));
-  return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
+                 p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end())));
+  return (uint)(pointer_delta(addr, reserved_region().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
 }
 
 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
@@ -63,7 +67,7 @@
 template <class T>
 inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
   HeapRegion* hr = heap_region_containing_raw(addr);
-  if (hr->continuesHumongous()) {
+  if (hr->is_continues_humongous()) {
     return hr->humongous_start_region();
   }
   return hr;
@@ -95,13 +99,15 @@
                                                      unsigned int* gc_count_before_ret,
                                                      int* gclocker_retry_count_ret) {
   assert_heap_not_locked_and_not_at_safepoint();
-  assert(!isHumongous(word_size), "attempt_allocation() should not "
+  assert(!is_humongous(word_size), "attempt_allocation() should not "
          "be called for humongous allocation requests");
 
-  HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
-                                                      false /* bot_updates */);
+  AllocationContext_t context = AllocationContext::current();
+  HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
+                                                                                   false /* bot_updates */);
   if (result == NULL) {
     result = attempt_allocation_slow(word_size,
+                                     context,
                                      gc_count_before_ret,
                                      gclocker_retry_count_ret);
   }
@@ -112,17 +118,17 @@
   return result;
 }
 
-inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
-                                                              word_size) {
-  assert(!isHumongous(word_size),
+inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size,
+                                                              AllocationContext_t context) {
+  assert(!is_humongous(word_size),
          "we should not be seeing humongous-size allocations in this path");
 
-  HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size,
-                                                      false /* bot_updates */);
+  HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size,
+                                                                                       false /* bot_updates */);
   if (result == NULL) {
     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
-    result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size,
-                                                      false /* bot_updates */);
+    result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
+                                                                                      false /* bot_updates */);
   }
   if (result != NULL) {
     dirty_young_block(result, word_size);
@@ -130,16 +136,17 @@
   return result;
 }
 
-inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) {
-  assert(!isHumongous(word_size),
+inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size,
+                                                         AllocationContext_t context) {
+  assert(!is_humongous(word_size),
          "we should not be seeing humongous-size allocations in this path");
 
-  HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size,
-                                                       true /* bot_updates */);
+  HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size,
+                                                                                  true /* bot_updates */);
   if (result == NULL) {
     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
-    result = _old_gc_alloc_region.attempt_allocation_locked(word_size,
-                                                       true /* bot_updates */);
+    result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
+                                                                                 true /* bot_updates */);
   }
   return result;
 }
@@ -159,7 +166,7 @@
   assert(word_size > 0, "pre-condition");
   assert(containing_hr->is_in(start), "it should contain start");
   assert(containing_hr->is_young(), "it should be young");
-  assert(!containing_hr->isHumongous(), "it should not be humongous");
+  assert(!containing_hr->is_humongous(), "it should not be humongous");
 
   HeapWord* end = start + word_size;
   assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap_ext.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
+
+void G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
+                                                    jlong* totals,
+                                                    jbyte* accuracy,
+                                                    jint len) {
+}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -192,7 +192,7 @@
     bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
     bool during_conc_mark = _g1h->mark_in_progress();
 
-    assert(!hr->isHumongous(), "sanity");
+    assert(!hr->is_humongous(), "sanity");
     assert(hr->in_collection_set(), "bad CS");
 
     if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -43,9 +43,7 @@
     _hot_cache_idx = 0;
 
     // For refining the cards in the hot cache in parallel
-    uint n_workers = (ParallelGCThreads > 0 ?
-                        _g1h->workers()->total_workers() : 1);
-    _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / (int)n_workers);
+    _hot_cache_par_chunk_size = (ParallelGCThreads > 0 ? ClaimChunkSize : _hot_cache_size);
     _hot_cache_par_claimed_idx = 0;
 
     _card_counts.initialize(card_counts_storage);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -70,6 +70,9 @@
 
   G1CardCounts _card_counts;
 
+  // The number of cached cards a thread claims when flushing the cache
+  static const int ClaimChunkSize = 32;
+
   bool default_use_cache() const {
     return (G1ConcRSLogCacheSize > 0);
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -193,76 +193,6 @@
   gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive);
 }
 
-class G1PrepareCompactClosure: public HeapRegionClosure {
-  G1CollectedHeap* _g1h;
-  ModRefBarrierSet* _mrbs;
-  CompactPoint _cp;
-  HeapRegionSetCount _humongous_regions_removed;
-
-  bool is_cp_initialized() const {
-    return _cp.space != NULL;
-  }
-
-  void prepare_for_compaction(HeapRegion* hr, HeapWord* end) {
-    // If this is the first live region that we came across which we can compact,
-    // initialize the CompactPoint.
-    if (!is_cp_initialized()) {
-      _cp.space = hr;
-      _cp.threshold = hr->initialize_threshold();
-    }
-    hr->prepare_for_compaction(&_cp);
-    // Also clear the part of the card table that will be unused after
-    // compaction.
-    _mrbs->clear(MemRegion(hr->compaction_top(), end));
-  }
-
-  void free_humongous_region(HeapRegion* hr) {
-    HeapWord* end = hr->end();
-    FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
-
-    assert(hr->startsHumongous(),
-           "Only the start of a humongous region should be freed.");
-
-    hr->set_containing_set(NULL);
-    _humongous_regions_removed.increment(1u, hr->capacity());
-
-    _g1h->free_humongous_region(hr, &dummy_free_list, false /* par */);
-    prepare_for_compaction(hr, end);
-    dummy_free_list.remove_all();
-  }
-
-public:
-  G1PrepareCompactClosure()
-  : _g1h(G1CollectedHeap::heap()),
-    _mrbs(_g1h->g1_barrier_set()),
-    _cp(NULL),
-    _humongous_regions_removed() { }
-
-  void update_sets() {
-    // We'll recalculate total used bytes and recreate the free list
-    // at the end of the GC, so no point in updating those values here.
-    HeapRegionSetCount empty_set;
-    _g1h->remove_from_old_sets(empty_set, _humongous_regions_removed);
-  }
-
-  bool doHeapRegion(HeapRegion* hr) {
-    if (hr->isHumongous()) {
-      if (hr->startsHumongous()) {
-        oop obj = oop(hr->bottom());
-        if (obj->is_gc_marked()) {
-          obj->forward_to(obj);
-        } else  {
-          free_humongous_region(hr);
-        }
-      } else {
-        assert(hr->continuesHumongous(), "Invalid humongous.");
-      }
-    } else {
-      prepare_for_compaction(hr, hr->end());
-    }
-    return false;
-  }
-};
 
 void G1MarkSweep::mark_sweep_phase2() {
   // Now all live objects are marked, compute the new object addresses.
@@ -271,21 +201,17 @@
   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
   // tracking expects us to do so. See comment under phase4.
 
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
   GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
   GenMarkSweep::trace("2");
 
-  G1PrepareCompactClosure blk;
-  g1h->heap_region_iterate(&blk);
-  blk.update_sets();
+  prepare_compaction();
 }
 
 class G1AdjustPointersClosure: public HeapRegionClosure {
  public:
   bool doHeapRegion(HeapRegion* r) {
-    if (r->isHumongous()) {
-      if (r->startsHumongous()) {
+    if (r->is_humongous()) {
+      if (r->is_starts_humongous()) {
         // We must adjust the pointers on the single H object.
         oop obj = oop(r->bottom());
         // point all the oops to the new location
@@ -340,8 +266,8 @@
   G1SpaceCompactClosure() {}
 
   bool doHeapRegion(HeapRegion* hr) {
-    if (hr->isHumongous()) {
-      if (hr->startsHumongous()) {
+    if (hr->is_humongous()) {
+      if (hr->is_starts_humongous()) {
         oop obj = oop(hr->bottom());
         if (obj->is_gc_marked()) {
           obj->init_mark();
@@ -373,3 +299,68 @@
   g1h->heap_region_iterate(&blk);
 
 }
+
+void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) {
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  g1h->heap_region_iterate(blk);
+  blk->update_sets();
+}
+
+void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) {
+  HeapWord* end = hr->end();
+  FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
+
+  assert(hr->is_starts_humongous(),
+         "Only the start of a humongous region should be freed.");
+
+  hr->set_containing_set(NULL);
+  _humongous_regions_removed.increment(1u, hr->capacity());
+
+  _g1h->free_humongous_region(hr, &dummy_free_list, false /* par */);
+  prepare_for_compaction(hr, end);
+  dummy_free_list.remove_all();
+}
+
+void G1PrepareCompactClosure::prepare_for_compaction(HeapRegion* hr, HeapWord* end) {
+  // If this is the first live region that we came across which we can compact,
+  // initialize the CompactPoint.
+  if (!is_cp_initialized()) {
+    _cp.space = hr;
+    _cp.threshold = hr->initialize_threshold();
+  }
+  prepare_for_compaction_work(&_cp, hr, end);
+}
+
+void G1PrepareCompactClosure::prepare_for_compaction_work(CompactPoint* cp,
+                                                          HeapRegion* hr,
+                                                          HeapWord* end) {
+  hr->prepare_for_compaction(cp);
+  // Also clear the part of the card table that will be unused after
+  // compaction.
+  _mrbs->clear(MemRegion(hr->compaction_top(), end));
+}
+
+void G1PrepareCompactClosure::update_sets() {
+  // We'll recalculate total used bytes and recreate the free list
+  // at the end of the GC, so no point in updating those values here.
+  HeapRegionSetCount empty_set;
+  _g1h->remove_from_old_sets(empty_set, _humongous_regions_removed);
+}
+
+bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) {
+  if (hr->is_humongous()) {
+    if (hr->is_starts_humongous()) {
+      oop obj = oop(hr->bottom());
+      if (obj->is_gc_marked()) {
+        obj->forward_to(obj);
+      } else  {
+        free_humongous_region(hr);
+      }
+    } else {
+      assert(hr->is_continues_humongous(), "Invalid humongous.");
+    }
+  } else {
+    prepare_for_compaction(hr, hr->end());
+  }
+  return false;
+}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -43,7 +43,7 @@
 // compaction.
 //
 // Class unloading will only occur when a full gc is invoked.
-
+class G1PrepareCompactClosure;
 
 class G1MarkSweep : AllStatic {
   friend class VM_G1MarkSweep;
@@ -70,6 +70,30 @@
   static void mark_sweep_phase4();
 
   static void allocate_stacks();
+  static void prepare_compaction();
+  static void prepare_compaction_work(G1PrepareCompactClosure* blk);
+};
+
+class G1PrepareCompactClosure : public HeapRegionClosure {
+ protected:
+  G1CollectedHeap* _g1h;
+  ModRefBarrierSet* _mrbs;
+  CompactPoint _cp;
+  HeapRegionSetCount _humongous_regions_removed;
+
+  virtual void prepare_for_compaction(HeapRegion* hr, HeapWord* end);
+  void prepare_for_compaction_work(CompactPoint* cp, HeapRegion* hr, HeapWord* end);
+  void free_humongous_region(HeapRegion* hr);
+  bool is_cp_initialized() const { return _cp.space != NULL; }
+
+ public:
+  G1PrepareCompactClosure() :
+    _g1h(G1CollectedHeap::heap()),
+    _mrbs(_g1h->g1_barrier_set()),
+    _humongous_regions_removed() { }
+
+  void update_sets();
+  bool doHeapRegion(HeapRegion* hr);
 };
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1MARKSWEEP_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep_ext.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1MarkSweep.hpp"
+
+void G1MarkSweep::prepare_compaction() {
+  G1PrepareCompactClosure blk;
+  G1MarkSweep::prepare_compaction_work(&blk);
+}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -38,11 +38,8 @@
     _g1_rem(g1h->g1_rem_set()),
     _hash_seed(17), _queue_num(queue_num),
     _term_attempts(0),
-    _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
-    _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
     _age_table(false), _scanner(g1h, rp),
-    _strong_roots_time(0), _term_time(0),
-    _alloc_buffer_waste(0), _undo_waste(0) {
+    _strong_roots_time(0), _term_time(0) {
   _scanner.set_par_scan_thread_state(this);
   // we allocate G1YoungSurvRateNumRegions plus one entries, since
   // we "sacrifice" entry 0 to keep track of surviving bytes for
@@ -60,14 +57,14 @@
   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
   memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
 
-  _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
-  _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
+  _g1_par_allocator = G1ParGCAllocator::create_allocator(_g1h);
 
   _start = os::elapsedTime();
 }
 
 G1ParScanThreadState::~G1ParScanThreadState() {
-  retire_alloc_buffers();
+  _g1_par_allocator->retire_alloc_buffers();
+  delete _g1_par_allocator;
   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
 }
 
@@ -90,14 +87,16 @@
   const double elapsed_ms = elapsed_time() * 1000.0;
   const double s_roots_ms = strong_roots_time() * 1000.0;
   const double term_ms    = term_time() * 1000.0;
+  const size_t alloc_buffer_waste = _g1_par_allocator->alloc_buffer_waste();
+  const size_t undo_waste         = _g1_par_allocator->undo_waste();
   st->print_cr("%3d %9.2f %9.2f %6.2f "
                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
                i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
                term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
-               (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
-               alloc_buffer_waste() * HeapWordSize / K,
-               undo_waste() * HeapWordSize / K);
+               (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
+               alloc_buffer_waste * HeapWordSize / K,
+               undo_waste * HeapWordSize / K);
 }
 
 #ifdef ASSERT
@@ -164,12 +163,13 @@
                                            : m->age();
   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
                                                              word_sz);
-  HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
+  AllocationContext_t context = from_region->allocation_context();
+  HeapWord* obj_ptr = _g1_par_allocator->allocate(alloc_purpose, word_sz, context);
 #ifndef PRODUCT
   // Should this evacuation fail?
   if (_g1h->evacuation_should_fail()) {
     if (obj_ptr != NULL) {
-      undo_allocation(alloc_purpose, obj_ptr, word_sz);
+      _g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
       obj_ptr = NULL;
     }
   }
@@ -246,66 +246,8 @@
       obj->oop_iterate_backwards(&_scanner);
     }
   } else {
-    undo_allocation(alloc_purpose, obj_ptr, word_sz);
+    _g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
     obj = forward_ptr;
   }
   return obj;
 }
-
-HeapWord* G1ParScanThreadState::allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
-  HeapWord* obj = NULL;
-  size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
-  if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
-    G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
-    add_to_alloc_buffer_waste(alloc_buf->words_remaining());
-    alloc_buf->retire(false /* end_of_gc */, false /* retain */);
-
-    HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
-    if (buf == NULL) {
-      return NULL; // Let caller handle allocation failure.
-    }
-    // Otherwise.
-    alloc_buf->set_word_size(gclab_word_size);
-    alloc_buf->set_buf(buf);
-
-    obj = alloc_buf->allocate(word_sz);
-    assert(obj != NULL, "buffer was definitely big enough...");
-  } else {
-    obj = _g1h->par_allocate_during_gc(purpose, word_sz);
-  }
-  return obj;
-}
-
-void G1ParScanThreadState::undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
-  if (alloc_buffer(purpose)->contains(obj)) {
-    assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
-           "should contain whole object");
-    alloc_buffer(purpose)->undo_allocation(obj, word_sz);
-  } else {
-    CollectedHeap::fill_with_object(obj, word_sz);
-    add_to_undo_waste(word_sz);
-  }
-}
-
-HeapWord* G1ParScanThreadState::allocate(GCAllocPurpose purpose, size_t word_sz) {
-  HeapWord* obj = NULL;
-  if (purpose == GCAllocForSurvived) {
-    obj = alloc_buffer(GCAllocForSurvived)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
-  } else {
-    obj = alloc_buffer(GCAllocForTenured)->allocate(word_sz);
-  }
-  if (obj != NULL) {
-    return obj;
-  }
-  return allocate_slow(purpose, word_sz);
-}
-
-void G1ParScanThreadState::retire_alloc_buffers() {
-  for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-    size_t waste = _alloc_buffers[ap]->words_remaining();
-    add_to_alloc_buffer_waste(waste);
-    _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
-                                               true /* end_of_gc */,
-                                               false /* retain */);
-  }
-}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -46,9 +46,8 @@
   G1SATBCardTableModRefBS* _ct_bs;
   G1RemSet* _g1_rem;
 
-  G1ParGCAllocBuffer  _surviving_alloc_buffer;
-  G1ParGCAllocBuffer  _tenured_alloc_buffer;
-  G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
+  G1ParGCAllocator*   _g1_par_allocator;
+
   ageTable            _age_table;
 
   G1ParScanClosure    _scanner;
@@ -78,7 +77,6 @@
 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
 
   void   add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
-
   void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
 
   DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
@@ -90,13 +88,6 @@
 
   ageTable*         age_table()       { return &_age_table;       }
 
-  G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
-    return _alloc_buffers[purpose];
-  }
-
-  size_t alloc_buffer_waste() const              { return _alloc_buffer_waste; }
-  size_t undo_waste() const                      { return _undo_waste; }
-
 #ifdef ASSERT
   bool queue_is_empty() const { return _refs->is_empty(); }
 
@@ -110,7 +101,7 @@
     _refs->push(ref);
   }
 
-  template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
+  template <class T> void update_rs(HeapRegion* from, T* p, uint tid) {
     // If the new value of the field points to the same region or
     // is the to-space, we don't need to include it in the Rset updates.
     if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
@@ -121,12 +112,6 @@
       }
     }
   }
- private:
-
-  inline HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz);
-  inline HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz);
-  inline void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz);
-
  public:
 
   void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
@@ -172,8 +157,6 @@
   }
 
  private:
-  void retire_alloc_buffers();
-
   #define G1_PARTIAL_ARRAY_MASK 0x2
 
   inline bool has_partial_array_mask(oop* ref) const {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -413,7 +413,7 @@
     _ctbs(_g1h->g1_barrier_set()) {}
 
   bool doHeapRegion(HeapRegion* r) {
-    if (!r->continuesHumongous()) {
+    if (!r->is_continues_humongous()) {
       r->rem_set()->scrub(_ctbs, _region_bm, _card_bm);
     }
     return false;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -119,7 +119,7 @@
   // Record, if necessary, the fact that *p (where "p" is in region "from",
   // which is required to be non-NULL) has changed to a new non-NULL value.
   template <class T> void write_ref(HeapRegion* from, T* p);
-  template <class T> void par_write_ref(HeapRegion* from, T* p, int tid);
+  template <class T> void par_write_ref(HeapRegion* from, T* p, uint tid);
 
   // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
   // or card, respectively, such that a region or card with a corresponding
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -44,7 +44,7 @@
 }
 
 template <class T>
-inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, int tid) {
+inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, uint tid) {
   oop obj = oopDesc::load_decode_heap_oop(p);
   if (obj == NULL) {
     return;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -263,7 +263,7 @@
       current = &_free;
     } else if (r->is_young()) {
       current = &_young;
-    } else if (r->isHumongous()) {
+    } else if (r->is_humongous()) {
       current = &_humonguous;
     } else if (r->is_old()) {
       current = &_old;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -28,6 +28,7 @@
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
 #include "gc_implementation/g1/heapRegion.inline.hpp"
+#include "gc_implementation/g1/heapRegionBounds.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
 #include "gc_implementation/shared/liveRange.hpp"
@@ -138,32 +139,16 @@
   }
 }
 
-// Minimum region size; we won't go lower than that.
-// We might want to decrease this in the future, to deal with small
-// heaps a bit more efficiently.
-#define MIN_REGION_SIZE  (      1024 * 1024 )
-
-// Maximum region size; we don't go higher than that. There's a good
-// reason for having an upper bound. We don't want regions to get too
-// large, otherwise cleanup's effectiveness would decrease as there
-// will be fewer opportunities to find totally empty regions after
-// marking.
-#define MAX_REGION_SIZE  ( 32 * 1024 * 1024 )
-
-// The automatic region size calculation will try to have around this
-// many regions in the heap (based on the min heap size).
-#define TARGET_REGION_NUMBER          2048
-
 size_t HeapRegion::max_region_size() {
-  return (size_t)MAX_REGION_SIZE;
+  return HeapRegionBounds::max_size();
 }
 
 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
   uintx region_size = G1HeapRegionSize;
   if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
     size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
-    region_size = MAX2(average_heap_size / TARGET_REGION_NUMBER,
-                       (uintx) MIN_REGION_SIZE);
+    region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(),
+                       (uintx) HeapRegionBounds::min_size());
   }
 
   int region_size_log = log2_long((jlong) region_size);
@@ -173,10 +158,10 @@
   region_size = ((uintx)1 << region_size_log);
 
   // Now make sure that we don't go over or under our limits.
-  if (region_size < MIN_REGION_SIZE) {
-    region_size = MIN_REGION_SIZE;
-  } else if (region_size > MAX_REGION_SIZE) {
-    region_size = MAX_REGION_SIZE;
+  if (region_size < HeapRegionBounds::min_size()) {
+    region_size = HeapRegionBounds::min_size();
+  } else if (region_size > HeapRegionBounds::max_size()) {
+    region_size = HeapRegionBounds::max_size();
   }
 
   // And recalculate the log.
@@ -213,11 +198,12 @@
 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
   assert(_humongous_start_region == NULL,
          "we should have already filtered out humongous regions");
-  assert(_end == _orig_end,
+  assert(_end == orig_end(),
          "we should have already filtered out humongous regions");
 
   _in_collection_set = false;
 
+  set_allocation_context(AllocationContext::system());
   set_young_index_in_cset(-1);
   uninstall_surv_rate_group();
   set_free();
@@ -264,9 +250,9 @@
   _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
 }
 
-void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
-  assert(!isHumongous(), "sanity / pre-condition");
-  assert(end() == _orig_end,
+void HeapRegion::set_starts_humongous(HeapWord* new_top, HeapWord* new_end) {
+  assert(!is_humongous(), "sanity / pre-condition");
+  assert(end() == orig_end(),
          "Should be normal before the humongous object allocation");
   assert(top() == bottom(), "should be empty");
   assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
@@ -278,30 +264,30 @@
   _offsets.set_for_starts_humongous(new_top);
 }
 
-void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
-  assert(!isHumongous(), "sanity / pre-condition");
-  assert(end() == _orig_end,
+void HeapRegion::set_continues_humongous(HeapRegion* first_hr) {
+  assert(!is_humongous(), "sanity / pre-condition");
+  assert(end() == orig_end(),
          "Should be normal before the humongous object allocation");
   assert(top() == bottom(), "should be empty");
-  assert(first_hr->startsHumongous(), "pre-condition");
+  assert(first_hr->is_starts_humongous(), "pre-condition");
 
   _type.set_continues_humongous();
   _humongous_start_region = first_hr;
 }
 
 void HeapRegion::clear_humongous() {
-  assert(isHumongous(), "pre-condition");
+  assert(is_humongous(), "pre-condition");
 
-  if (startsHumongous()) {
+  if (is_starts_humongous()) {
     assert(top() <= end(), "pre-condition");
-    set_end(_orig_end);
+    set_end(orig_end());
     if (top() > end()) {
       // at least one "continues humongous" region after it
       set_top(end());
     }
   } else {
     // continues humongous
-    assert(end() == _orig_end, "sanity");
+    assert(end() == orig_end(), "sanity");
   }
 
   assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
@@ -324,9 +310,10 @@
                        MemRegion mr) :
     G1OffsetTableContigSpace(sharedOffsetArray, mr),
     _hrm_index(hrm_index),
+    _allocation_context(AllocationContext::system()),
     _humongous_start_region(NULL),
     _in_collection_set(false),
-    _next_in_special_set(NULL), _orig_end(NULL),
+    _next_in_special_set(NULL),
     _claimed(InitialClaimValue), _evacuation_failed(false),
     _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
     _next_young_region(NULL),
@@ -349,10 +336,14 @@
 
   G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
 
-  _orig_end = mr.end();
   hr_clear(false /*par*/, false /*clear_space*/);
   set_top(bottom());
   record_top_and_timestamp();
+
+  assert(mr.end() == orig_end(),
+         err_msg("Given region end address " PTR_FORMAT " should match exactly "
+                 "bottom plus one region size, i.e. " PTR_FORMAT,
+                 p2i(mr.end()), p2i(orig_end())));
 }
 
 CompactibleSpace* HeapRegion::next_compaction_space() const {
@@ -663,7 +654,7 @@
     return;
   }
 
-  if (continuesHumongous()) {
+  if (is_continues_humongous()) {
     if (strong_code_roots_length > 0) {
       gclog_or_tty->print_cr("region "HR_FORMAT" is a continuation of a humongous "
                              "region but has "SIZE_FORMAT" code root entries",
@@ -683,6 +674,8 @@
 
 void HeapRegion::print() const { print_on(gclog_or_tty); }
 void HeapRegion::print_on(outputStream* st) const {
+  st->print("AC%4u", allocation_context());
+
   st->print(" %2s", get_short_type_str());
   if (in_collection_set())
     st->print(" CS");
@@ -788,7 +781,7 @@
         HeapRegion* to   = _g1h->heap_region_containing(obj);
         if (from != NULL && to != NULL &&
             from != to &&
-            !to->isHumongous()) {
+            !to->is_humongous()) {
           jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
           jbyte cv_field = *_bs->byte_for_const(p);
           const jbyte dirty = CardTableModRefBS::dirty_card_val();
@@ -842,19 +835,19 @@
   HeapWord* p = bottom();
   HeapWord* prev_p = NULL;
   VerifyLiveClosure vl_cl(g1, vo);
-  bool is_humongous = isHumongous();
+  bool is_region_humongous = is_humongous();
   size_t object_num = 0;
   while (p < top()) {
     oop obj = oop(p);
     size_t obj_size = block_size(p);
     object_num += 1;
 
-    if (is_humongous != g1->isHumongous(obj_size) &&
+    if (is_region_humongous != g1->is_humongous(obj_size) &&
         !g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects.
       gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
                              SIZE_FORMAT" words) in a %shumongous region",
-                             p, g1->isHumongous(obj_size) ? "" : "non-",
-                             obj_size, is_humongous ? "" : "non-");
+                             p, g1->is_humongous(obj_size) ? "" : "non-",
+                             obj_size, is_region_humongous ? "" : "non-");
        *failures = true;
        return;
     }
@@ -963,7 +956,7 @@
     }
   }
 
-  if (is_humongous && object_num > 1) {
+  if (is_region_humongous && object_num > 1) {
     gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
                            "but has "SIZE_FORMAT", objects",
                            bottom(), end(), object_num);
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
 
+#include "gc_implementation/g1/g1AllocationContext.hpp"
 #include "gc_implementation/g1/g1BlockOffsetTable.hpp"
 #include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
 #include "gc_implementation/g1/heapRegionType.hpp"
@@ -222,13 +223,12 @@
   // The index of this region in the heap region sequence.
   uint  _hrm_index;
 
+  AllocationContext_t _allocation_context;
+
   HeapRegionType _type;
 
   // For a humongous region, region in which it starts.
   HeapRegion* _humongous_start_region;
-  // For the start region of a humongous sequence, it's original end().
-  HeapWord* _orig_end;
-
   // True iff the region is in current collection_set.
   bool _in_collection_set;
 
@@ -417,9 +417,9 @@
   bool is_eden()     const { return _type.is_eden();     }
   bool is_survivor() const { return _type.is_survivor(); }
 
-  bool isHumongous() const { return _type.is_humongous(); }
-  bool startsHumongous() const { return _type.is_starts_humongous(); }
-  bool continuesHumongous() const { return _type.is_continues_humongous();   }
+  bool is_humongous() const { return _type.is_humongous(); }
+  bool is_starts_humongous() const { return _type.is_starts_humongous(); }
+  bool is_continues_humongous() const { return _type.is_continues_humongous();   }
 
   bool is_old() const { return _type.is_old(); }
 
@@ -431,10 +431,10 @@
   // Return the number of distinct regions that are covered by this region:
   // 1 if the region is not humongous, >= 1 if the region is humongous.
   uint region_num() const {
-    if (!isHumongous()) {
+    if (!is_humongous()) {
       return 1U;
     } else {
-      assert(startsHumongous(), "doesn't make sense on HC regions");
+      assert(is_starts_humongous(), "doesn't make sense on HC regions");
       assert(capacity() % HeapRegion::GrainBytes == 0, "sanity");
       return (uint) (capacity() >> HeapRegion::LogOfHRGrainBytes);
     }
@@ -443,7 +443,7 @@
   // Return the index + 1 of the last HC regions that's associated
   // with this HS region.
   uint last_hc_index() const {
-    assert(startsHumongous(), "don't call this otherwise");
+    assert(is_starts_humongous(), "don't call this otherwise");
     return hrm_index() + region_num();
   }
 
@@ -452,7 +452,7 @@
   // their _end set up to be the end of the last continues region of the
   // corresponding humongous object.
   bool is_in_reserved_raw(const void* p) const {
-    return _bottom <= p && p < _orig_end;
+    return _bottom <= p && p < orig_end();
   }
 
   // Makes the current region be a "starts humongous" region, i.e.,
@@ -478,12 +478,12 @@
   // humongous regions can be calculated by just looking at the
   // "starts humongous" regions and by ignoring the "continues
   // humongous" regions.
-  void set_startsHumongous(HeapWord* new_top, HeapWord* new_end);
+  void set_starts_humongous(HeapWord* new_top, HeapWord* new_end);
 
   // Makes the current region be a "continues humongous'
   // region. first_hr is the "start humongous" region of the series
   // which this region will be part of.
-  void set_continuesHumongous(HeapRegion* first_hr);
+  void set_continues_humongous(HeapRegion* first_hr);
 
   // Unsets the humongous-related fields on the region.
   void clear_humongous();
@@ -513,6 +513,14 @@
     _next_in_special_set = r;
   }
 
+  void set_allocation_context(AllocationContext_t context) {
+    _allocation_context = context;
+  }
+
+  AllocationContext_t  allocation_context() const {
+    return _allocation_context;
+  }
+
   // Methods used by the HeapRegionSetBase class and subclasses.
 
   // Getter and setter for the next and prev fields used to link regions into
@@ -556,7 +564,8 @@
   void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
   bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
 
-  HeapWord* orig_end() const { return _orig_end; }
+  // For the start region of a humongous sequence, it's original end().
+  HeapWord* orig_end() const { return _bottom + GrainWords; }
 
   // Reset HR stuff to default values.
   void hr_clear(bool par, bool clear_space, bool locked = false);
@@ -603,7 +612,7 @@
   bool is_marked() { return _prev_top_at_mark_start != bottom(); }
 
   void reset_during_compaction() {
-    assert(isHumongous() && startsHumongous(),
+    assert(is_starts_humongous(),
            "should only be called for starts humongous regions");
 
     zero_marked_bytes();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionBounds.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_HPP
+
+class HeapRegionBounds : public AllStatic {
+private:
+  // Minimum region size; we won't go lower than that.
+  // We might want to decrease this in the future, to deal with small
+  // heaps a bit more efficiently.
+  static const size_t MIN_REGION_SIZE = 1024 * 1024;
+
+  // Maximum region size; we don't go higher than that. There's a good
+  // reason for having an upper bound. We don't want regions to get too
+  // large, otherwise cleanup's effectiveness would decrease as there
+  // will be fewer opportunities to find totally empty regions after
+  // marking.
+  static const size_t MAX_REGION_SIZE = 32 * 1024 * 1024;
+
+  // The automatic region size calculation will try to have around this
+  // many regions in the heap (based on the min heap size).
+  static const size_t TARGET_REGION_NUMBER = 2048;
+
+public:
+  static inline size_t min_size();
+  static inline size_t max_size();
+  static inline size_t target_number();
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionBounds.inline.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "gc_implementation/g1/heapRegionBounds.hpp"
+
+size_t HeapRegionBounds::min_size() {
+  return MIN_REGION_SIZE;
+}
+
+size_t HeapRegionBounds::max_size() {
+  return MAX_REGION_SIZE;
+}
+
+size_t HeapRegionBounds::target_number() {
+  return TARGET_REGION_NUMBER;
+}
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -66,10 +66,11 @@
 #endif
 
 HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
-  HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(hrm_index);
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
   MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
   assert(reserved().contains(mr), "invariant");
-  return new HeapRegion(hrm_index, G1CollectedHeap::heap()->bot_shared(), mr);
+  return g1h->allocator()->new_heap_region(hrm_index, g1h->bot_shared(), mr);
 }
 
 void HeapRegionManager::commit_regions(uint index, size_t num_regions) {
@@ -281,7 +282,7 @@
     // We'll ignore "continues humongous" regions (we'll process them
     // when we come across their corresponding "start humongous"
     // region) and regions already claimed.
-    if (r->claim_value() == claim_value || r->continuesHumongous()) {
+    if (r->claim_value() == claim_value || r->is_continues_humongous()) {
       continue;
     }
     // OK, try to claim it
@@ -289,7 +290,7 @@
       continue;
     }
     // Success!
-    if (r->startsHumongous()) {
+    if (r->is_starts_humongous()) {
       // If the region is "starts humongous" we'll iterate over its
       // "continues humongous" first; in fact we'll do them
       // first. The order is important. In one case, calling the
@@ -301,7 +302,7 @@
       for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) {
         HeapRegion* chr = _regions.get_by_index(ch_index);
 
-        assert(chr->continuesHumongous(), "Must be humongous region");
+        assert(chr->is_continues_humongous(), "Must be humongous region");
         assert(chr->humongous_start_region() == r,
                err_msg("Must work on humongous continuation of the original start region "
                        PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr)));
@@ -311,7 +312,7 @@
         bool claim_result = chr->claimHeapRegion(claim_value);
         // We should always be able to claim it; no one else should
         // be trying to claim this region.
-        guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
+        guarantee(claim_result, "We should always be able to claim the is_continues_humongous part of the humongous object");
 
         bool res2 = blk->doHeapRegion(chr);
         if (res2) {
@@ -322,7 +323,7 @@
         // does something with "continues humongous" regions
         // clears them). We might have to weaken it in the future,
         // but let's leave these two asserts here for extra safety.
-        assert(chr->continuesHumongous(), "should still be the case");
+        assert(chr->is_continues_humongous(), "should still be the case");
         assert(chr->humongous_start_region() == r, "sanity");
       }
     }
@@ -424,7 +425,7 @@
     // this method may be called, we have only completed allocation of the regions,
     // but not put into a region set.
     prev_committed = true;
-    if (hr->startsHumongous()) {
+    if (hr->is_starts_humongous()) {
       prev_end = hr->orig_end();
     } else {
       prev_end = hr->end();
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -419,7 +419,7 @@
   FromCardCache::print();
 }
 
-void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
+void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
   uint cur_hrm_ind = hr()->hrm_index();
 
   if (G1TraceHeapRegionRememberedSet) {
@@ -435,10 +435,10 @@
   if (G1TraceHeapRegionRememberedSet) {
     gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)",
                   hr()->bottom(), from_card,
-                  FromCardCache::at((uint)tid, cur_hrm_ind));
+                  FromCardCache::at(tid, cur_hrm_ind));
   }
 
-  if (FromCardCache::contains_or_replace((uint)tid, cur_hrm_ind, from_card)) {
+  if (FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) {
     if (G1TraceHeapRegionRememberedSet) {
       gclog_or_tty->print_cr("  from-card cache hit.");
     }
@@ -493,7 +493,7 @@
         return;
       } else {
         if (G1TraceHeapRegionRememberedSet) {
-          gclog_or_tty->print_cr("   [tid %d] sparse table entry "
+          gclog_or_tty->print_cr("   [tid %u] sparse table entry "
                         "overflow(f: %d, t: %u)",
                         tid, from_hrm_ind, cur_hrm_ind);
         }
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -179,7 +179,7 @@
 
   // For now.  Could "expand" some tables in the future, so that this made
   // sense.
-  void add_reference(OopOrNarrowOopStar from, int tid);
+  void add_reference(OopOrNarrowOopStar from, uint tid);
 
   // Removes any entries shown by the given bitmaps to contain only dead
   // objects.
@@ -301,7 +301,7 @@
   }
 
   // Used in the parallel case.
-  void add_reference(OopOrNarrowOopStar from, int tid) {
+  void add_reference(OopOrNarrowOopStar from, uint tid) {
     _other_regions.add_reference(from, tid);
   }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -41,7 +41,7 @@
 void HeapRegionSetBase::verify_region(HeapRegion* hr) {
   assert(hr->containing_set() == this, err_msg("Inconsistent containing set for %u", hr->hrm_index()));
   assert(!hr->is_young(), err_msg("Adding young region %u", hr->hrm_index())); // currently we don't use these sets for young regions
-  assert(hr->isHumongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrm_index(), name()));
+  assert(hr->is_humongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrm_index(), name()));
   assert(hr->is_free() == regions_free(), err_msg("Wrong free state for region %u and set %s", hr->hrm_index(), name()));
   assert(!hr->is_free() || hr->is_empty(), err_msg("Free region %u is not empty for set %s", hr->hrm_index(), name()));
   assert(!hr->is_empty() || hr->is_free(), err_msg("Empty region %u is not free for set %s", hr->hrm_index(), name()));
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionType.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionType.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -30,8 +30,8 @@
     case FreeTag:
     case EdenTag:
     case SurvTag:
-    case HumStartsTag:
-    case HumContTag:
+    case StartsHumongousTag:
+    case ContinuesHumongousTag:
     case OldTag:
       return true;
   }
@@ -41,12 +41,12 @@
 const char* HeapRegionType::get_str() const {
   hrt_assert_is_valid(_tag);
   switch (_tag) {
-    case FreeTag:      return "FREE";
-    case EdenTag:      return "EDEN";
-    case SurvTag:      return "SURV";
-    case HumStartsTag: return "HUMS";
-    case HumContTag:   return "HUMC";
-    case OldTag:       return "OLD";
+    case FreeTag:               return "FREE";
+    case EdenTag:               return "EDEN";
+    case SurvTag:               return "SURV";
+    case StartsHumongousTag:    return "HUMS";
+    case ContinuesHumongousTag: return "HUMC";
+    case OldTag:                return "OLD";
   }
   ShouldNotReachHere();
   // keep some compilers happy
@@ -56,12 +56,12 @@
 const char* HeapRegionType::get_short_str() const {
   hrt_assert_is_valid(_tag);
   switch (_tag) {
-    case FreeTag:      return "F";
-    case EdenTag:      return "E";
-    case SurvTag:      return "S";
-    case HumStartsTag: return "HS";
-    case HumContTag:   return "HC";
-    case OldTag:       return "O";
+    case FreeTag:               return "F";
+    case EdenTag:               return "E";
+    case SurvTag:               return "S";
+    case StartsHumongousTag:    return "HS";
+    case ContinuesHumongousTag: return "HC";
+    case OldTag:                return "O";
   }
   ShouldNotReachHere();
   // keep some compilers happy
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionType.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionType.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -49,22 +49,22 @@
   // 0001 1 [ 3] Survivor
   //
   // 0010 0      Humongous Mask
-  // 0010 0 [ 4] Humongous Starts
-  // 0010 1 [ 5] Humongous Continues
+  // 0010 0 [ 4] Starts Humongous
+  // 0010 1 [ 5] Continues Humongous
   //
   // 01000 [ 8] Old
   typedef enum {
-    FreeTag       = 0,
+    FreeTag               = 0,
 
-    YoungMask     = 2,
-    EdenTag       = YoungMask,
-    SurvTag       = YoungMask + 1,
+    YoungMask             = 2,
+    EdenTag               = YoungMask,
+    SurvTag               = YoungMask + 1,
 
-    HumMask       = 4,
-    HumStartsTag  = HumMask,
-    HumContTag    = HumMask + 1,
+    HumongousMask         = 4,
+    StartsHumongousTag    = HumongousMask,
+    ContinuesHumongousTag = HumongousMask + 1,
 
-    OldTag        = 8
+    OldTag                = 8
   } Tag;
 
   volatile Tag _tag;
@@ -104,9 +104,9 @@
   bool is_eden()     const { return get() == EdenTag;  }
   bool is_survivor() const { return get() == SurvTag;  }
 
-  bool is_humongous()           const { return (get() & HumMask) != 0; }
-  bool is_starts_humongous()    const { return get() == HumStartsTag;  }
-  bool is_continues_humongous() const { return get() == HumContTag;    }
+  bool is_humongous()           const { return (get() & HumongousMask) != 0;   }
+  bool is_starts_humongous()    const { return get() == StartsHumongousTag;    }
+  bool is_continues_humongous() const { return get() == ContinuesHumongousTag; }
 
   bool is_old() const { return get() == OldTag; }
 
@@ -118,8 +118,8 @@
   void set_eden_pre_gc() { set_from(EdenTag, SurvTag); }
   void set_survivor()    { set_from(SurvTag, FreeTag); }
 
-  void set_starts_humongous()    { set_from(HumStartsTag, FreeTag); }
-  void set_continues_humongous() { set_from(HumContTag,   FreeTag); }
+  void set_starts_humongous()    { set_from(StartsHumongousTag,    FreeTag); }
+  void set_continues_humongous() { set_from(ContinuesHumongousTag, FreeTag); }
 
   void set_old() { set(OldTag); }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -45,11 +45,13 @@
   nonstatic_field(HeapRegionManager, _regions,          G1HeapRegionTable)    \
   nonstatic_field(HeapRegionManager, _num_committed,    uint)                 \
                                                                               \
+  nonstatic_field(G1Allocator,     _summary_bytes_used, size_t)               \
+                                                                              \
   nonstatic_field(G1CollectedHeap, _hrm,                HeapRegionManager)    \
-  nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t)               \
   nonstatic_field(G1CollectedHeap, _g1mm,               G1MonitoringSupport*) \
   nonstatic_field(G1CollectedHeap, _old_set,            HeapRegionSetBase)    \
   nonstatic_field(G1CollectedHeap, _humongous_set,      HeapRegionSetBase)    \
+  nonstatic_field(G1CollectedHeap, _allocator,          G1Allocator*)         \
                                                                               \
   nonstatic_field(G1MonitoringSupport, _eden_committed,     size_t)           \
   nonstatic_field(G1MonitoringSupport, _eden_used,          size_t)           \
@@ -72,14 +74,16 @@
                                                                               \
   declare_type(G1OffsetTableContigSpace, CompactibleSpace)                    \
   declare_type(HeapRegion, G1OffsetTableContigSpace)                          \
-  declare_toplevel_type(HeapRegionManager)                                        \
+  declare_toplevel_type(HeapRegionManager)                                    \
   declare_toplevel_type(HeapRegionSetBase)                                    \
   declare_toplevel_type(HeapRegionSetCount)                                   \
   declare_toplevel_type(G1MonitoringSupport)                                  \
+  declare_toplevel_type(G1Allocator)                                          \
                                                                               \
   declare_toplevel_type(G1CollectedHeap*)                                     \
   declare_toplevel_type(HeapRegion*)                                          \
   declare_toplevel_type(G1MonitoringSupport*)                                 \
+  declare_toplevel_type(G1Allocator*)                                         \
 
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -45,7 +45,8 @@
 void VM_G1CollectForAllocation::doit() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   GCCauseSetter x(g1h, _gc_cause);
-  _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
+
+  _result = g1h->satisfy_failed_allocation(_word_size, allocation_context(), &_pause_succeeded);
   assert(_result == NULL || _pause_succeeded,
          "if we get back a result, the pause should have succeeded");
 }
@@ -99,7 +100,7 @@
 
   if (_word_size > 0) {
     // An allocation has been requested. So, try to do that first.
-    _result = g1h->attempt_allocation_at_safepoint(_word_size,
+    _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
                                      false /* expect_null_cur_alloc_region */);
     if (_result != NULL) {
       // If we can successfully allocate before we actually do the
@@ -152,7 +153,7 @@
     g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
   if (_pause_succeeded && _word_size > 0) {
     // An allocation had been requested.
-    _result = g1h->attempt_allocation_at_safepoint(_word_size,
+    _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
                                       true /* expect_null_cur_alloc_region */);
   } else {
     assert(_result == NULL, "invariant");
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_VM_OPERATIONS_G1_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_VM_OPERATIONS_G1_HPP
 
+#include "gc_implementation/g1/g1AllocationContext.hpp"
 #include "gc_implementation/shared/vmGCOperations.hpp"
 
 // VM_operations for the G1 collector.
@@ -40,6 +41,7 @@
   size_t    _word_size;
   HeapWord* _result;
   bool      _pause_succeeded;
+  AllocationContext_t _allocation_context;
 
 public:
   VM_G1OperationWithAllocRequest(unsigned int gc_count_before,
@@ -49,6 +51,8 @@
       _word_size(word_size), _result(NULL), _pause_succeeded(false) { }
   HeapWord* result() { return _result; }
   bool pause_succeeded() { return _pause_succeeded; }
+  void set_allocation_context(AllocationContext_t context) { _allocation_context = context; }
+  AllocationContext_t  allocation_context() { return _allocation_context; }
 };
 
 class VM_G1CollectFull: public VM_GC_Operation {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -288,7 +288,7 @@
           while (p < to) {
             Prefetch::write(p, interval);
             oop m = oop(p);
-            assert(m->is_oop_or_null(), "check for header");
+            assert(m->is_oop_or_null(), err_msg("Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m)));
             m->push_contents(pm);
             p += m->size();
           }
@@ -296,7 +296,7 @@
         } else {
           while (p < to) {
             oop m = oop(p);
-            assert(m->is_oop_or_null(), "check for header");
+            assert(m->is_oop_or_null(), err_msg("Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m)));
             m->push_contents(pm);
             p += m->size();
           }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -74,10 +74,9 @@
     return JNI_ENOMEM;
   }
 
-  _reserved = MemRegion((HeapWord*)heap_rs.base(),
-                        (HeapWord*)(heap_rs.base() + heap_rs.size()));
+  initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 
-  CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
+  CardTableExtension* const barrier_set = new CardTableExtension(reserved_region(), 3);
   barrier_set->initialize();
   _barrier_set = barrier_set;
   oopDesc::set_bs(_barrier_set);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -2882,7 +2882,7 @@
         start_array->allocate_block(addr);
       }
       oop(addr)->update_contents(cm);
-      assert(oop(addr)->is_oop_or_null(), "should be an oop now");
+      assert(oop(addr)->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(oop(addr))));
     }
   }
 }
@@ -3366,7 +3366,7 @@
 
   oop moved_oop = (oop) destination();
   moved_oop->update_contents(compaction_manager());
-  assert(moved_oop->is_oop_or_null(), "Object should be whole at this point");
+  assert(moved_oop->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(moved_oop)));
 
   update_state(words);
   assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -582,6 +582,14 @@
   }
 }
 
+void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
+  // It is important to do this in a way such that concurrent readers can't
+  // temporarily think something is in the heap.  (Seen this happen in asserts.)
+  _reserved.set_word_size(0);
+  _reserved.set_start(start);
+  _reserved.set_end(end);
+}
+
 /////////////// Unit tests ///////////////
 
 #ifndef PRODUCT
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -85,6 +85,7 @@
   friend class VMStructs;
   friend class IsGCActiveMark; // Block structured external access to _is_gc_active
 
+ private:
 #ifdef ASSERT
   static int       _fire_out_of_memory_count;
 #endif
@@ -97,8 +98,9 @@
   // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
   bool _defer_initial_card_mark;
 
+  MemRegion _reserved;
+
  protected:
-  MemRegion _reserved;
   BarrierSet* _barrier_set;
   bool _is_gc_active;
   uint _n_par_threads;
@@ -211,6 +213,7 @@
   // Stop any onging concurrent work and prepare for exit.
   virtual void stop() {}
 
+  void initialize_reserved_region(HeapWord *start, HeapWord *end);
   MemRegion reserved_region() const { return _reserved; }
   address base() const { return (address)reserved_region().start(); }
 
@@ -637,6 +640,15 @@
   // actual number may be germane.
   static bool use_parallel_gc_threads() { return ParallelGCThreads > 0; }
 
+  // Copy the current allocation context statistics for the specified contexts.
+  // For each context in contexts, set the corresponding entries in the totals
+  // and accuracy arrays to the current values held by the statistics.  Each
+  // array should be of length len.
+  virtual void copy_allocation_context_stats(const jint* contexts,
+                                             jlong* totals,
+                                             jbyte* accuracy,
+                                             jint len) { }
+
   /////////////// Unit tests ///////////////
 
   NOT_PRODUCT(static void test_is_in();)
--- a/hotspot/src/share/vm/gc_interface/gcCause.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_interface/gcCause.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -54,6 +54,9 @@
     case _wb_young_gc:
       return "WhiteBox Initiated Young GC";
 
+    case _update_allocation_context_stats:
+      return "Update Allocation Context Stats";
+
     case _no_gc:
       return "No GC";
 
--- a/hotspot/src/share/vm/gc_interface/gcCause.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/gc_interface/gcCause.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -47,6 +47,7 @@
     _heap_inspection,
     _heap_dump,
     _wb_young_gc,
+    _update_allocation_context_stats,
 
     /* implementation independent, but reserved for GC use */
     _no_gc,
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -35,7 +35,7 @@
 #ifdef ASSERT
 #define VERIFY_OOP(o_) \
       if (VerifyOops) { \
-        assert((oop(o_))->is_oop_or_null(), "Not an oop!"); \
+        assert((oop(o_))->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(oop(o_)))); \
         StubRoutines::_verify_oop_count++;  \
       }
 #else
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -1077,7 +1077,7 @@
 address SignatureHandlerLibrary::set_handler_blob() {
   BufferBlob* handler_blob = BufferBlob::create("native signature handlers", blob_size);
   if (handler_blob == NULL) {
-    CompileBroker::handle_full_code_cache();
+    CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
     return NULL;
   }
   address handler = handler_blob->code_begin();
--- a/hotspot/src/share/vm/memory/allocation.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/memory/allocation.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -693,14 +693,16 @@
 // compilers and they should be upwards compatible with C++11/14. Therefore
 // PLEASE BE CAREFUL if you change the signature of the following operators!
 
+static void * zero = (void *) 0;
+
 void* operator new(size_t size) /* throw(std::bad_alloc) */ {
   fatal("Should not call global operator new");
-  return 0;
+  return zero;
 }
 
 void* operator new [](size_t size) /* throw(std::bad_alloc) */ {
   fatal("Should not call global operator new[]");
-  return 0;
+  return zero;
 }
 
 void* operator new(size_t size, const std::nothrow_t&  nothrow_constant) throw() {
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -123,17 +123,9 @@
     return JNI_ENOMEM;
   }
 
-  _reserved = MemRegion((HeapWord*)heap_rs.base(),
-                        (HeapWord*)(heap_rs.base() + heap_rs.size()));
+  initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 
-  // It is important to do this in a way such that concurrent readers can't
-  // temporarily think something is in the heap.  (Seen this happen in asserts.)
-  _reserved.set_word_size(0);
-  _reserved.set_start((HeapWord*)heap_rs.base());
-  size_t actual_heap_size = heap_rs.size();
-  _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
-
-  _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
+  _rem_set = collector_policy()->create_rem_set(reserved_region(), n_covered_regions);
   set_barrier_set(rem_set()->bs());
 
   _gch = this;
--- a/hotspot/src/share/vm/memory/heap.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/memory/heap.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -35,7 +35,9 @@
 
 // Implementation of Heap
 
-CodeHeap::CodeHeap() {
+CodeHeap::CodeHeap(const char* name, const int code_blob_type)
+  : _code_blob_type(code_blob_type) {
+  _name                         = name;
   _number_of_committed_segments = 0;
   _number_of_reserved_segments  = 0;
   _segment_size                 = 0;
@@ -44,6 +46,8 @@
   _freelist                     = NULL;
   _freelist_segments            = 0;
   _freelist_length              = 0;
+  _max_allocated_capacity       = 0;
+  _was_full                     = false;
 }
 
 
@@ -88,9 +92,8 @@
 }
 
 
-bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
-                       size_t segment_size) {
-  assert(reserved_size >= committed_size, "reserved < committed");
+bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_size) {
+  assert(rs.size() >= committed_size, "reserved < committed");
   assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
   assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
 
@@ -102,18 +105,13 @@
   if (os::can_execute_large_page_memory()) {
     const size_t min_pages = 8;
     page_size = MIN2(os::page_size_for_region(committed_size, min_pages),
-                     os::page_size_for_region(reserved_size, min_pages));
+                     os::page_size_for_region(rs.size(), min_pages));
   }
 
   const size_t granularity = os::vm_allocation_granularity();
-  const size_t r_align = MAX2(page_size, granularity);
-  const size_t r_size = align_size_up(reserved_size, r_align);
   const size_t c_size = align_size_up(committed_size, page_size);
 
-  const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
-    MAX2(page_size, granularity);
-  ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
-  os::trace_page_sizes("code heap", committed_size, reserved_size, page_size,
+  os::trace_page_sizes(_name, committed_size, rs.size(), page_size,
                        rs.base(), rs.size());
   if (!_memory.initialize(rs, c_size)) {
     return false;
@@ -186,6 +184,7 @@
     assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check");
     assert(!block->free(), "must be marked free");
     DEBUG_ONLY(memset((void*)block->allocated_space(), badCodeHeapNewVal, instance_size));
+    _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
     return block->allocated_space();
   }
 
@@ -207,6 +206,7 @@
     b->initialize(number_of_segments);
     _next_segment += number_of_segments;
     DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size));
+    _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
     return b->allocated_space();
   } else {
     return NULL;
--- a/hotspot/src/share/vm/memory/heap.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/memory/heap.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_MEMORY_HEAP_HPP
 #define SHARE_VM_MEMORY_HEAP_HPP
 
+#include "code/codeBlob.hpp"
 #include "memory/allocation.hpp"
 #include "runtime/virtualspace.hpp"
 
@@ -93,6 +94,11 @@
   FreeBlock*   _freelist;
   size_t       _freelist_segments;               // No. of segments in freelist
   int          _freelist_length;
+  size_t       _max_allocated_capacity;          // Peak capacity that was allocated during lifetime of the heap
+
+  const char*  _name;                            // Name of the CodeHeap
+  const int    _code_blob_type;                  // CodeBlobType it contains
+  bool         _was_full;                        // True if the code heap was full
 
   enum { free_sentinel = 0xFF };
 
@@ -127,10 +133,10 @@
   void clear();                                 // clears all heap contents
 
  public:
-  CodeHeap();
+  CodeHeap(const char* name, const int code_blob_type);
 
   // Heap extents
-  bool  reserve(size_t reserved_size, size_t committed_size, size_t segment_size);
+  bool  reserve(ReservedSpace rs, size_t committed_size, size_t segment_size);
   bool  expand_by(size_t size);                  // expands committed memory by size
 
   // Memory allocation
@@ -161,8 +167,18 @@
   size_t max_capacity() const;
   int    allocated_segments() const;
   size_t allocated_capacity() const;
+  size_t max_allocated_capacity() const          { return _max_allocated_capacity; }
   size_t unallocated_capacity() const            { return max_capacity() - allocated_capacity(); }
 
+  // Returns true if the CodeHeap contains CodeBlobs of the given type
+  bool accepts(int code_blob_type) const         { return (_code_blob_type == code_blob_type); }
+  int code_blob_type() const                     { return _code_blob_type; }
+
+  // Debugging / Profiling
+  const char* name() const                       { return _name; }
+  bool was_full()                                { return _was_full; }
+  void report_full()                             { _was_full = true; }
+
 private:
   size_t heap_unallocated_capacity() const;
 
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -473,7 +473,7 @@
   _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
   oop discovered = java_lang_ref_Reference::discovered(_ref);
   assert(_discovered_addr && discovered->is_oop_or_null(),
-         "discovered field is bad");
+         err_msg("Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)));
   _next = discovered;
   _referent_addr = java_lang_ref_Reference::referent_addr(_ref);
   _referent = java_lang_ref_Reference::referent(_ref);
@@ -482,7 +482,9 @@
   assert(allow_null_referent ?
              _referent->is_oop_or_null()
            : _referent->is_oop(),
-         "bad referent");
+         err_msg("Expected an oop%s for referent field at " PTR_FORMAT,
+                 (allow_null_referent ? " or NULL" : ""),
+                 p2i(_referent)));
 }
 
 void DiscoveredListIterator::remove() {
@@ -630,7 +632,7 @@
     oop next = java_lang_ref_Reference::next(iter.obj());
     if ((iter.referent() == NULL || iter.is_referent_alive() ||
          next != NULL)) {
-      assert(next->is_oop_or_null(), "bad next field");
+      assert(next->is_oop_or_null(), err_msg("Expected an oop or NULL for next field at " PTR_FORMAT, p2i(next)));
       // Remove Reference object from list
       iter.remove();
       // Trace the cohorts
@@ -979,7 +981,7 @@
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
     oop next = java_lang_ref_Reference::next(iter.obj());
-    assert(next->is_oop_or_null(), "bad next field");
+    assert(next->is_oop_or_null(), err_msg("Expected an oop or NULL for next field at " PTR_FORMAT, p2i(next)));
     // If referent has been cleared or Reference is not active,
     // drop it.
     if (iter.referent() == NULL || next != NULL) {
@@ -1172,7 +1174,7 @@
 
   HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
   const oop  discovered = java_lang_ref_Reference::discovered(obj);
-  assert(discovered->is_oop_or_null(), "bad discovered field");
+  assert(discovered->is_oop_or_null(), err_msg("Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)));
   if (discovered != NULL) {
     // The reference has already been discovered...
     if (TraceReferenceGC) {
--- a/hotspot/src/share/vm/memory/space.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/memory/space.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -331,11 +331,10 @@
   CompactibleSpace* space;
   HeapWord* threshold;
 
-  CompactPoint(Generation* _gen) :
-    gen(_gen), space(NULL), threshold(0) {}
+  CompactPoint(Generation* g = NULL) :
+    gen(g), space(NULL), threshold(0) {}
 };
 
-
 // A space that supports compaction operations.  This is usually, but not
 // necessarily, a space that is normally contiguous.  But, for example, a
 // free-list-based space whose normal collection is a mark-sweep without
--- a/hotspot/src/share/vm/memory/universe.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/memory/universe.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -127,6 +127,8 @@
 oop Universe::_arithmetic_exception_instance          = NULL;
 oop Universe::_virtual_machine_error_instance         = NULL;
 oop Universe::_vm_exception                           = NULL;
+oop Universe::_allocation_context_notification_obj    = NULL;
+
 Method* Universe::_throw_illegal_access_error         = NULL;
 Array<int>* Universe::_the_empty_int_array            = NULL;
 Array<u2>* Universe::_the_empty_short_array           = NULL;
@@ -196,6 +198,7 @@
   f->do_oop((oop*)&_main_thread_group);
   f->do_oop((oop*)&_system_thread_group);
   f->do_oop((oop*)&_vm_exception);
+  f->do_oop((oop*)&_allocation_context_notification_obj);
   debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);)
 }
 
--- a/hotspot/src/share/vm/memory/universe.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/memory/universe.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -178,6 +178,8 @@
   // the vm thread.
   static oop          _vm_exception;
 
+  static oop          _allocation_context_notification_obj;
+
   // The particular choice of collected heap.
   static CollectedHeap* _collectedHeap;
 
@@ -307,6 +309,10 @@
   static oop          arithmetic_exception_instance() { return _arithmetic_exception_instance; }
   static oop          virtual_machine_error_instance() { return _virtual_machine_error_instance; }
   static oop          vm_exception()                  { return _vm_exception; }
+
+  static inline oop   allocation_context_notification_obj();
+  static inline void  set_allocation_context_notification_obj(oop obj);
+
   static Method*      throw_illegal_access_error()    { return _throw_illegal_access_error; }
 
   static Array<int>*       the_empty_int_array()    { return _the_empty_int_array; }
--- a/hotspot/src/share/vm/memory/universe.inline.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/memory/universe.inline.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -41,4 +41,12 @@
   return type == T_DOUBLE || type == T_LONG;
 }
 
+inline oop Universe::allocation_context_notification_obj() {
+  return _allocation_context_notification_obj;
+}
+
+inline void Universe::set_allocation_context_notification_obj(oop obj) {
+  _allocation_context_notification_obj = obj;
+}
+
 #endif // SHARE_VM_MEMORY_UNIVERSE_INLINE_HPP
--- a/hotspot/src/share/vm/oops/method.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/oops/method.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -93,7 +93,7 @@
   set_hidden(false);
   set_dont_inline(false);
   set_method_data(NULL);
-  set_method_counters(NULL);
+  clear_method_counters();
   set_vtable_index(Method::garbage_vtable_index);
 
   // Fix and bury in Method*
@@ -117,7 +117,7 @@
   MetadataFactory::free_metadata(loader_data, method_data());
   set_method_data(NULL);
   MetadataFactory::free_metadata(loader_data, method_counters());
-  set_method_counters(NULL);
+  clear_method_counters();
   // The nmethod will be gone when we get here.
   if (code() != NULL) _code = NULL;
 }
@@ -395,9 +395,7 @@
   methodHandle mh(m);
   ClassLoaderData* loader_data = mh->method_holder()->class_loader_data();
   MethodCounters* counters = MethodCounters::allocate(loader_data, CHECK_NULL);
-  if (mh->method_counters() == NULL) {
-    mh->set_method_counters(counters);
-  } else {
+  if (!mh->init_method_counters(counters)) {
     MetadataFactory::free_metadata(loader_data, counters);
   }
   return mh->method_counters();
@@ -859,7 +857,7 @@
   assert(!DumpSharedSpaces || _method_data == NULL, "unexpected method data?");
 
   set_method_data(NULL);
-  set_method_counters(NULL);
+  clear_method_counters();
 }
 
 // Called when the method_holder is getting linked. Setup entrypoints so the method
--- a/hotspot/src/share/vm/oops/method.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/oops/method.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -333,11 +333,13 @@
     return _method_counters;
   }
 
-  void set_method_counters(MethodCounters* counters) {
-    // The store into method must be released. On platforms without
-    // total store order (TSO) the reference may become visible before
-    // the initialization of data otherwise.
-    OrderAccess::release_store_ptr((volatile void *)&_method_counters, counters);
+  void clear_method_counters() {
+    _method_counters = NULL;
+  }
+
+  bool init_method_counters(MethodCounters* counters) {
+    // Try to install a pointer to MethodCounters, return true on success.
+    return Atomic::cmpxchg_ptr(counters, (volatile void*)&_method_counters, NULL) == NULL;
   }
 
 #ifdef TIERED
--- a/hotspot/src/share/vm/opto/c2compiler.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/opto/c2compiler.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -24,7 +24,9 @@
 
 #include "precompiled.hpp"
 #include "opto/c2compiler.hpp"
+#include "opto/compile.hpp"
 #include "opto/optoreg.hpp"
+#include "opto/output.hpp"
 #include "opto/runtime.hpp"
 
 // register information defined by ADLC
@@ -147,3 +149,8 @@
 void C2Compiler::print_timers() {
   // do nothing
 }
+
+int C2Compiler::initial_code_buffer_size() {
+  assert(SegmentedCodeCache, "Should be only used with a segmented code cache");
+  return Compile::MAX_inst_size + Compile::MAX_locs_size + initial_const_capacity;
+}
--- a/hotspot/src/share/vm/opto/c2compiler.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/opto/c2compiler.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -50,6 +50,9 @@
 
   // Print compilation timers and statistics
   void print_timers();
+
+  // Initial size of the code buffer (may be increased at runtime)
+  static int initial_code_buffer_size();
 };
 
 #endif // SHARE_VM_OPTO_C2COMPILER_HPP
--- a/hotspot/src/share/vm/opto/compile.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/opto/compile.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -535,7 +535,7 @@
     if (scratch_buffer_blob() == NULL) {
       // Let CompilerBroker disable further compilations.
       record_failure("Not enough space for scratch buffer in CodeCache");
-      CompileBroker::handle_full_code_cache();
+      CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
       return;
     }
   }
--- a/hotspot/src/share/vm/opto/output.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/opto/output.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -1166,7 +1166,7 @@
   // Have we run out of code space?
   if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
     C->record_failure("CodeCache is full");
-    CompileBroker::handle_full_code_cache();
+    CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
     return NULL;
   }
   // Configure the code buffer.
@@ -1491,7 +1491,7 @@
       cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
       if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
         C->record_failure("CodeCache is full");
-        CompileBroker::handle_full_code_cache();
+        CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
         return;
       }
 
@@ -1648,7 +1648,7 @@
   // One last check for failed CodeBuffer::expand:
   if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
     C->record_failure("CodeCache is full");
-    CompileBroker::handle_full_code_cache();
+    CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
     return;
   }
 
--- a/hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -228,19 +228,17 @@
   // created nmethod will notify normally and nmethods which are freed
   // can be safely skipped.
   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-  nmethod* current = CodeCache::first_nmethod();
-  while (current != NULL) {
-    // Only notify for live nmethods
-    if (current->is_alive()) {
-      // Lock the nmethod so it can't be freed
-      nmethodLocker nml(current);
+  // Iterate over non-profiled and profiled nmethods
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* current = iter.method();
+    // Lock the nmethod so it can't be freed
+    nmethodLocker nml(current);
 
-      // Don't hold the lock over the notify or jmethodID creation
-      MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-      current->get_and_cache_jmethod_id();
-      JvmtiExport::post_compiled_method_load(current);
-    }
-    current = CodeCache::next_nmethod(current);
+    // Don't hold the lock over the notify or jmethodID creation
+    MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    current->get_and_cache_jmethod_id();
+    JvmtiExport::post_compiled_method_load(current);
   }
   return JVMTI_ERROR_NONE;
 }
--- a/hotspot/src/share/vm/prims/whitebox.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/prims/whitebox.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -257,7 +257,7 @@
   G1CollectedHeap* g1 = G1CollectedHeap::heap();
   oop result = JNIHandles::resolve(obj);
   const HeapRegion* hr = g1->heap_region_containing(result);
-  return hr->isHumongous();
+  return hr->is_humongous();
 WB_END
 
 WB_ENTRY(jlong, WB_G1NumFreeRegions(JNIEnv* env, jobject o))
@@ -713,6 +713,12 @@
 WB_ENTRY(void, WB_FullGC(JNIEnv* env, jobject o))
   Universe::heap()->collector_policy()->set_should_clear_all_soft_refs(true);
   Universe::heap()->collect(GCCause::_last_ditch_collection);
+#if INCLUDE_ALL_GCS
+  if (UseG1GC) {
+    // Needs to be cleared explicitly for G1
+    Universe::heap()->collector_policy()->set_should_clear_all_soft_refs(false);
+  }
+#endif // INCLUDE_ALL_GCS
 WB_END
 
 WB_ENTRY(void, WB_YoungGC(JNIEnv* env, jobject o))
@@ -864,6 +870,36 @@
   return ret;
 }
 
+void WhiteBox::register_methods(JNIEnv* env, jclass wbclass, JavaThread* thread, JNINativeMethod* method_array, int method_count) {
+  ResourceMark rm;
+  ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
+
+  //  one by one registration natives for exception catching
+  jclass no_such_method_error_klass = env->FindClass(vmSymbols::java_lang_NoSuchMethodError()->as_C_string());
+  CHECK_JNI_EXCEPTION(env);
+  for (int i = 0, n = method_count; i < n; ++i) {
+    // Skip dummy entries
+    if (method_array[i].fnPtr == NULL) continue;
+    if (env->RegisterNatives(wbclass, &method_array[i], 1) != 0) {
+      jthrowable throwable_obj = env->ExceptionOccurred();
+      if (throwable_obj != NULL) {
+        env->ExceptionClear();
+        if (env->IsInstanceOf(throwable_obj, no_such_method_error_klass)) {
+          // NoSuchMethodError is thrown when a method can't be found or a method is not native.
+          // Ignoring the exception since it is not preventing use of other WhiteBox methods.
+          tty->print_cr("Warning: 'NoSuchMethodError' on register of sun.hotspot.WhiteBox::%s%s",
+              method_array[i].name, method_array[i].signature);
+        }
+      } else {
+        // Registration failed unexpectedly.
+        tty->print_cr("Warning: unexpected error on register of sun.hotspot.WhiteBox::%s%s. All methods will be unregistered",
+            method_array[i].name, method_array[i].signature);
+        env->UnregisterNatives(wbclass);
+        break;
+      }
+    }
+  }
+}
 
 #define CC (char*)
 
@@ -971,35 +1007,9 @@
       instanceKlassHandle ikh = instanceKlassHandle(JNIHandles::resolve(wbclass)->klass());
       Handle loader(ikh->class_loader());
       if (loader.is_null()) {
-        ResourceMark rm;
-        ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
-        bool result = true;
-        //  one by one registration natives for exception catching
-        jclass exceptionKlass = env->FindClass(vmSymbols::java_lang_NoSuchMethodError()->as_C_string());
-        CHECK_JNI_EXCEPTION(env);
-        for (int i = 0, n = sizeof(methods) / sizeof(methods[0]); i < n; ++i) {
-          if (env->RegisterNatives(wbclass, methods + i, 1) != 0) {
-            result = false;
-            jthrowable throwable_obj = env->ExceptionOccurred();
-            if (throwable_obj != NULL) {
-              env->ExceptionClear();
-              if (env->IsInstanceOf(throwable_obj, exceptionKlass)) {
-                // j.l.NoSuchMethodError is thrown when a method can't be found or a method is not native
-                // ignoring the exception
-                tty->print_cr("Warning: 'NoSuchMethodError' on register of sun.hotspot.WhiteBox::%s%s", methods[i].name, methods[i].signature);
-              }
-            } else {
-              // register is failed w/o exception or w/ unexpected exception
-              tty->print_cr("Warning: unexpected error on register of sun.hotspot.WhiteBox::%s%s. All methods will be unregistered", methods[i].name, methods[i].signature);
-              env->UnregisterNatives(wbclass);
-              break;
-            }
-          }
-        }
-
-        if (result) {
-          WhiteBox::set_used();
-        }
+        WhiteBox::register_methods(env, wbclass, thread, methods, sizeof(methods) / sizeof(methods[0]));
+        WhiteBox::register_extended(env, wbclass, thread);
+        WhiteBox::set_used();
       }
     }
   }
--- a/hotspot/src/share/vm/prims/whitebox.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/prims/whitebox.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -29,6 +29,8 @@
 
 #include "memory/allocation.hpp"
 #include "oops/oopsHierarchy.hpp"
+#include "oops/symbol.hpp"
+#include "runtime/interfaceSupport.hpp"
 
 // Entry macro to transition from JNI to VM state.
 
@@ -64,6 +66,9 @@
   static bool lookup_bool(const char* field_name, oop object);
 
   static int array_bytes_to_length(size_t bytes);
+  static void register_methods(JNIEnv* env, jclass wbclass, JavaThread* thread,
+    JNINativeMethod* method_array, int method_count);
+  static void register_extended(JNIEnv* env, jclass wbclass, JavaThread* thread);
 };
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/prims/whitebox_ext.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "prims/whitebox.hpp"
+
+void WhiteBox::register_extended(JNIEnv* env, jclass wbclass, JavaThread* thread) { }
--- a/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -215,7 +215,7 @@
   // The main intention is to keep enough free space for C2 compiled code
   // to achieve peak performance if the code cache is under stress.
   if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization))  {
-    double current_reverse_free_ratio = CodeCache::reverse_free_ratio();
+    double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level));
     if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
       k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
     }
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -36,6 +36,7 @@
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "runtime/arguments.hpp"
+#include "runtime/arguments_ext.hpp"
 #include "runtime/globals_extension.hpp"
 #include "runtime/java.hpp"
 #include "runtime/os.hpp"
@@ -88,6 +89,8 @@
 bool   Arguments::_has_profile                  = false;
 size_t Arguments::_conservative_max_heap_alignment = 0;
 uintx  Arguments::_min_heap_size                = 0;
+uintx  Arguments::_min_heap_free_ratio          = 0;
+uintx  Arguments::_max_heap_free_ratio          = 0;
 Arguments::Mode Arguments::_mode                = _mixed;
 bool   Arguments::_java_compiler                = false;
 bool   Arguments::_xdebug_mode                  = false;
@@ -1143,7 +1146,27 @@
   }
   // Increase the code cache size - tiered compiles a lot more.
   if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
-    FLAG_SET_DEFAULT(ReservedCodeCacheSize, ReservedCodeCacheSize * 5);
+    FLAG_SET_ERGO(uintx, ReservedCodeCacheSize, ReservedCodeCacheSize * 5);
+  }
+  // Enable SegmentedCodeCache if TieredCompilation is enabled and ReservedCodeCacheSize >= 240M
+  if (FLAG_IS_DEFAULT(SegmentedCodeCache) && ReservedCodeCacheSize >= 240*M) {
+    FLAG_SET_ERGO(bool, SegmentedCodeCache, true);
+
+    // Multiply sizes by 5 but fix NonMethodCodeHeapSize (distribute among non-profiled and profiled code heap)
+    if (FLAG_IS_DEFAULT(ProfiledCodeHeapSize)) {
+      FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, ProfiledCodeHeapSize * 5 + NonMethodCodeHeapSize * 2);
+    }
+    if (FLAG_IS_DEFAULT(NonProfiledCodeHeapSize)) {
+      FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize * 5 + NonMethodCodeHeapSize * 2);
+    }
+    // Check consistency of code heap sizes
+    if ((NonMethodCodeHeapSize + NonProfiledCodeHeapSize + ProfiledCodeHeapSize) != ReservedCodeCacheSize) {
+      jio_fprintf(defaultStream::error_stream(),
+                  "Invalid code heap sizes: NonMethodCodeHeapSize(%dK) + ProfiledCodeHeapSize(%dK) + NonProfiledCodeHeapSize(%dK) = %dK. Must be equal to ReservedCodeCacheSize = %uK.\n",
+                  NonMethodCodeHeapSize/K, ProfiledCodeHeapSize/K, NonProfiledCodeHeapSize/K,
+                  (NonMethodCodeHeapSize + ProfiledCodeHeapSize + NonProfiledCodeHeapSize)/K, ReservedCodeCacheSize/K);
+      vm_exit(1);
+    }
   }
   if (!UseInterpreter) { // -Xcomp
     Tier3InvokeNotifyFreqLog = 0;
@@ -1561,24 +1584,25 @@
                                           CollectorPolicy::compute_heap_alignment());
 }
 
-void Arguments::set_ergonomics_flags() {
-
+void Arguments::select_gc_ergonomically() {
   if (os::is_server_class_machine()) {
-    // If no other collector is requested explicitly,
-    // let the VM select the collector based on
-    // machine class and automatic selection policy.
-    if (!UseSerialGC &&
-        !UseConcMarkSweepGC &&
-        !UseG1GC &&
-        !UseParNewGC &&
-        FLAG_IS_DEFAULT(UseParallelGC)) {
-      if (should_auto_select_low_pause_collector()) {
-        FLAG_SET_ERGO(bool, UseConcMarkSweepGC, true);
-      } else {
-        FLAG_SET_ERGO(bool, UseParallelGC, true);
-      }
+    if (should_auto_select_low_pause_collector()) {
+      FLAG_SET_ERGO(bool, UseConcMarkSweepGC, true);
+    } else {
+      FLAG_SET_ERGO(bool, UseParallelGC, true);
     }
   }
+}
+
+void Arguments::select_gc() {
+  if (!gc_selected()) {
+    ArgumentsExt::select_gc_ergonomically();
+  }
+}
+
+void Arguments::set_ergonomics_flags() {
+  select_gc();
+
 #ifdef COMPILER2
   // Shared spaces work fine with other GCs but causes bytecode rewriting
   // to be disabled, which hurts interpreter performance and decreases
@@ -1630,9 +1654,11 @@
     // unless the user actually sets these flags.
     if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) {
       FLAG_SET_DEFAULT(MinHeapFreeRatio, 0);
+      _min_heap_free_ratio = MinHeapFreeRatio;
     }
     if (FLAG_IS_DEFAULT(MaxHeapFreeRatio)) {
       FLAG_SET_DEFAULT(MaxHeapFreeRatio, 100);
+      _max_heap_free_ratio = MaxHeapFreeRatio;
     }
   }
 
@@ -1696,6 +1722,46 @@
   }
 }
 
+#if !INCLUDE_ALL_GCS
+#ifdef ASSERT
+static bool verify_serial_gc_flags() {
+  return (UseSerialGC &&
+        !(UseParNewGC || (UseConcMarkSweepGC || CMSIncrementalMode) || UseG1GC ||
+          UseParallelGC || UseParallelOldGC));
+}
+#endif // ASSERT
+#endif // INCLUDE_ALL_GCS
+
+void Arguments::set_gc_specific_flags() {
+#if INCLUDE_ALL_GCS
+  // Set per-collector flags
+  if (UseParallelGC || UseParallelOldGC) {
+    set_parallel_gc_flags();
+  } else if (UseConcMarkSweepGC) { // Should be done before ParNew check below
+    set_cms_and_parnew_gc_flags();
+  } else if (UseParNewGC) {  // Skipped if CMS is set above
+    set_parnew_gc_flags();
+  } else if (UseG1GC) {
+    set_g1_gc_flags();
+  }
+  check_deprecated_gcs();
+  check_deprecated_gc_flags();
+  if (AssumeMP && !UseSerialGC) {
+    if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
+      warning("If the number of processors is expected to increase from one, then"
+              " you should configure the number of parallel GC threads appropriately"
+              " using -XX:ParallelGCThreads=N");
+    }
+  }
+  if (MinHeapFreeRatio == 100) {
+    // Keeping the heap 100% free is hard ;-) so limit it to 99%.
+    FLAG_SET_ERGO(uintx, MinHeapFreeRatio, 99);
+  }
+#else // INCLUDE_ALL_GCS
+  assert(verify_serial_gc_flags(), "SerialGC unset");
+#endif // INCLUDE_ALL_GCS
+}
+
 julong Arguments::limit_by_allocatable_memory(julong limit) {
   julong max_allocatable;
   julong result = limit;
@@ -1937,16 +2003,6 @@
   return false;
 }
 
-#if !INCLUDE_ALL_GCS
-#ifdef ASSERT
-static bool verify_serial_gc_flags() {
-  return (UseSerialGC &&
-        !(UseParNewGC || (UseConcMarkSweepGC || CMSIncrementalMode) || UseG1GC ||
-          UseParallelGC || UseParallelOldGC));
-}
-#endif // ASSERT
-#endif // INCLUDE_ALL_GCS
-
 // check if do gclog rotation
 // +UseGCLogFileRotation is a must,
 // no gc log rotation when log file not supplied or
@@ -2025,6 +2081,8 @@
                   MaxHeapFreeRatio);
     return false;
   }
+  // This does not set the flag itself, but stores the value in a safe place for later usage.
+  _min_heap_free_ratio = min_heap_free_ratio;
   return true;
 }
 
@@ -2039,11 +2097,13 @@
                   MinHeapFreeRatio);
     return false;
   }
+  // This does not set the flag itself, but stores the value in a safe place for later usage.
+  _max_heap_free_ratio = max_heap_free_ratio;
   return true;
 }
 
 // Check consistency of GC selection
-bool Arguments::check_gc_consistency() {
+bool Arguments::check_gc_consistency_user() {
   check_gclog_consistency();
   bool status = true;
   // Ensure that the user has not selected conflicting sets
@@ -2202,7 +2262,7 @@
     FLAG_SET_DEFAULT(UseGCOverheadLimit, false);
   }
 
-  status = status && check_gc_consistency();
+  status = status && ArgumentsExt::check_gc_consistency_user();
   status = status && check_stack_pages();
 
   if (CMSIncrementalMode) {
@@ -2442,6 +2502,18 @@
                 "Invalid ReservedCodeCacheSize=%dM. Must be at most %uM.\n", ReservedCodeCacheSize/M,
                 (2*G)/M);
     status = false;
+  } else if (NonMethodCodeHeapSize < min_code_cache_size){
+    jio_fprintf(defaultStream::error_stream(),
+                "Invalid NonMethodCodeHeapSize=%dK. Must be at least %uK.\n", NonMethodCodeHeapSize/K,
+                min_code_cache_size/K);
+    status = false;
+  } else if ((!FLAG_IS_DEFAULT(NonMethodCodeHeapSize) || !FLAG_IS_DEFAULT(ProfiledCodeHeapSize) || !FLAG_IS_DEFAULT(NonProfiledCodeHeapSize))
+             && (NonMethodCodeHeapSize + NonProfiledCodeHeapSize + ProfiledCodeHeapSize) != ReservedCodeCacheSize) {
+    jio_fprintf(defaultStream::error_stream(),
+                "Invalid code heap sizes: NonMethodCodeHeapSize(%dK) + ProfiledCodeHeapSize(%dK) + NonProfiledCodeHeapSize(%dK) = %dK. Must be equal to ReservedCodeCacheSize = %uK.\n",
+                NonMethodCodeHeapSize/K, ProfiledCodeHeapSize/K, NonProfiledCodeHeapSize/K,
+                (NonMethodCodeHeapSize + ProfiledCodeHeapSize + NonProfiledCodeHeapSize)/K, ReservedCodeCacheSize/K);
+    status = false;
   }
 
   status &= verify_interval(NmethodSweepFraction, 1, ReservedCodeCacheSize/K, "NmethodSweepFraction");
@@ -2459,8 +2531,6 @@
     warning("The VM option CICompilerCountPerCPU overrides CICompilerCount.");
   }
 
-  status &= check_vm_args_consistency_ext();
-
   return status;
 }
 
@@ -2868,8 +2938,41 @@
         return JNI_EINVAL;
       }
       FLAG_SET_CMDLINE(uintx, ReservedCodeCacheSize, (uintx)long_ReservedCodeCacheSize);
+      // -XX:NonMethodCodeHeapSize=
+    } else if (match_option(option, "-XX:NonMethodCodeHeapSize=", &tail)) {
+      julong long_NonMethodCodeHeapSize = 0;
+
+      ArgsRange errcode = parse_memory_size(tail, &long_NonMethodCodeHeapSize, 1);
+      if (errcode != arg_in_range) {
+        jio_fprintf(defaultStream::error_stream(),
+                    "Invalid maximum non-method code heap size: %s.\n", option->optionString);
+        return JNI_EINVAL;
+      }
+      FLAG_SET_CMDLINE(uintx, NonMethodCodeHeapSize, (uintx)long_NonMethodCodeHeapSize);
+      // -XX:ProfiledCodeHeapSize=
+    } else if (match_option(option, "-XX:ProfiledCodeHeapSize=", &tail)) {
+      julong long_ProfiledCodeHeapSize = 0;
+
+      ArgsRange errcode = parse_memory_size(tail, &long_ProfiledCodeHeapSize, 1);
+      if (errcode != arg_in_range) {
+        jio_fprintf(defaultStream::error_stream(),
+                    "Invalid maximum profiled code heap size: %s.\n", option->optionString);
+        return JNI_EINVAL;
+      }
+      FLAG_SET_CMDLINE(uintx, ProfiledCodeHeapSize, (uintx)long_ProfiledCodeHeapSize);
+      // -XX:NonProfiledCodeHeapSizee=
+    } else if (match_option(option, "-XX:NonProfiledCodeHeapSize=", &tail)) {
+      julong long_NonProfiledCodeHeapSize = 0;
+
+      ArgsRange errcode = parse_memory_size(tail, &long_NonProfiledCodeHeapSize, 1);
+      if (errcode != arg_in_range) {
+        jio_fprintf(defaultStream::error_stream(),
+                    "Invalid maximum non-profiled code heap size: %s.\n", option->optionString);
+        return JNI_EINVAL;
+      }
+      FLAG_SET_CMDLINE(uintx, NonProfiledCodeHeapSize, (uintx)long_NonProfiledCodeHeapSize);
       //-XX:IncreaseFirstTierCompileThresholdAt=
-      } else if (match_option(option, "-XX:IncreaseFirstTierCompileThresholdAt=", &tail)) {
+    } else if (match_option(option, "-XX:IncreaseFirstTierCompileThresholdAt=", &tail)) {
         uintx uint_IncreaseFirstTierCompileThresholdAt = 0;
         if (!parse_uintx(tail, &uint_IncreaseFirstTierCompileThresholdAt, 0) || uint_IncreaseFirstTierCompileThresholdAt > 99) {
           jio_fprintf(defaultStream::error_stream(),
@@ -3443,7 +3546,7 @@
     }
   }
 
-  if (!check_vm_args_consistency()) {
+  if (!ArgumentsExt::check_vm_args_consistency()) {
     return JNI_ERR;
   }
 
@@ -3799,7 +3902,7 @@
   set_shared_spaces_flags();
 
   // Check the GC selections again.
-  if (!check_gc_consistency()) {
+  if (!ArgumentsExt::check_gc_consistency_ergo()) {
     return JNI_EINVAL;
   }
 
@@ -3821,33 +3924,7 @@
   // Set heap size based on available physical memory
   set_heap_size();
 
-#if INCLUDE_ALL_GCS
-  // Set per-collector flags
-  if (UseParallelGC || UseParallelOldGC) {
-    set_parallel_gc_flags();
-  } else if (UseConcMarkSweepGC) { // Should be done before ParNew check below
-    set_cms_and_parnew_gc_flags();
-  } else if (UseParNewGC) {  // Skipped if CMS is set above
-    set_parnew_gc_flags();
-  } else if (UseG1GC) {
-    set_g1_gc_flags();
-  }
-  check_deprecated_gcs();
-  check_deprecated_gc_flags();
-  if (AssumeMP && !UseSerialGC) {
-    if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
-      warning("If the number of processors is expected to increase from one, then"
-              " you should configure the number of parallel GC threads appropriately"
-              " using -XX:ParallelGCThreads=N");
-    }
-  }
-  if (MinHeapFreeRatio == 100) {
-    // Keeping the heap 100% free is hard ;-) so limit it to 99%.
-    FLAG_SET_ERGO(uintx, MinHeapFreeRatio, 99);
-  }
-#else // INCLUDE_ALL_GCS
-  assert(verify_serial_gc_flags(), "SerialGC unset");
-#endif // INCLUDE_ALL_GCS
+  set_gc_specific_flags();
 
   // Initialize Metaspace flags and alignments
   Metaspace::ergo_initialize();
--- a/hotspot/src/share/vm/runtime/arguments.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -285,7 +285,11 @@
   // Value of the conservative maximum heap alignment needed
   static size_t  _conservative_max_heap_alignment;
 
-  static uintx  _min_heap_size;
+  static uintx _min_heap_size;
+
+  // Used to store original flag values
+  static uintx _min_heap_free_ratio;
+  static uintx _max_heap_free_ratio;
 
   // -Xrun arguments
   static AgentLibraryList _libraryList;
@@ -336,8 +340,10 @@
   static void set_conservative_max_heap_alignment();
   static void set_use_compressed_oops();
   static void set_use_compressed_klass_ptrs();
+  static void select_gc();
   static void set_ergonomics_flags();
   static void set_shared_spaces_flags();
+  static void set_gc_specific_flags();
   // limits the given memory size by the maximum amount of memory this process is
   // currently allowed to allocate or reserve.
   static julong limit_by_allocatable_memory(julong size);
@@ -449,6 +455,9 @@
   // Adjusts the arguments after the OS have adjusted the arguments
   static jint adjust_after_os();
 
+  static inline bool gc_selected(); // whether a gc has been selected
+  static void select_gc_ergonomically();
+
   // Verifies that the given value will fit as a MinHeapFreeRatio. If not, an error
   // message is returned in the provided buffer.
   static bool verify_MinHeapFreeRatio(FormatBuffer<80>& err_msg, uintx min_heap_free_ratio);
@@ -458,12 +467,12 @@
   static bool verify_MaxHeapFreeRatio(FormatBuffer<80>& err_msg, uintx max_heap_free_ratio);
 
   // Check for consistency in the selection of the garbage collector.
-  static bool check_gc_consistency();
+  static bool check_gc_consistency_user();        // Check user-selected gc
+  static inline bool check_gc_consistency_ergo(); // Check ergonomic-selected gc
   static void check_deprecated_gcs();
   static void check_deprecated_gc_flags();
   // Check consistency or otherwise of VM argument settings
   static bool check_vm_args_consistency();
-  static bool check_vm_args_consistency_ext();
   // Check stack pages settings
   static bool check_stack_pages();
   // Used by os_solaris
@@ -516,6 +525,10 @@
   static uintx min_heap_size()              { return _min_heap_size; }
   static void  set_min_heap_size(uintx v)   { _min_heap_size = v;  }
 
+  // Returns the original values of -XX:MinHeapFreeRatio and -XX:MaxHeapFreeRatio
+  static uintx min_heap_free_ratio()        { return _min_heap_free_ratio; }
+  static uintx max_heap_free_ratio()        { return _max_heap_free_ratio; }
+
   // -Xrun
   static AgentLibrary* libraries()          { return _libraryList.first(); }
   static bool init_libraries_at_startup()   { return !_libraryList.is_empty(); }
@@ -598,4 +611,13 @@
   static bool copy_expand_pid(const char* src, size_t srclen, char* buf, size_t buflen);
 };
 
+bool Arguments::gc_selected() {
+  return UseConcMarkSweepGC || UseG1GC || UseParallelGC || UseParallelOldGC ||
+    UseParNewGC || UseSerialGC;
+}
+
+bool Arguments::check_gc_consistency_ergo() {
+  return check_gc_consistency_user();
+}
+
 #endif // SHARE_VM_RUNTIME_ARGUMENTS_HPP
--- a/hotspot/src/share/vm/runtime/arguments_ext.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,30 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/arguments.hpp"
-
-bool Arguments::check_vm_args_consistency_ext() {
-  return true;
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/runtime/arguments_ext.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_ARGUMENTS_EXT_HPP
+#define SHARE_VM_RUNTIME_ARGUMENTS_EXT_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/arguments.hpp"
+
+class ArgumentsExt: AllStatic {
+public:
+  static inline void select_gc_ergonomically();
+  static inline bool check_gc_consistency_user();
+  static inline bool check_gc_consistency_ergo();
+  static inline bool check_vm_args_consistency();
+};
+
+void ArgumentsExt::select_gc_ergonomically() {
+  Arguments::select_gc_ergonomically();
+}
+
+bool ArgumentsExt::check_gc_consistency_user() {
+  return Arguments::check_gc_consistency_user();
+}
+
+bool ArgumentsExt::check_gc_consistency_ergo() {
+  return Arguments::check_gc_consistency_ergo();
+}
+
+bool ArgumentsExt::check_vm_args_consistency() {
+  return Arguments::check_vm_args_consistency();
+}
+
+#endif // SHARE_VM_RUNTIME_ARGUMENTS_EXT_HPP
--- a/hotspot/src/share/vm/runtime/fprofiler.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/runtime/fprofiler.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -165,7 +165,7 @@
   for (int index = 0; index < s; index++) {
     counters[index] = 0;
   }
-  base = CodeCache::first_address();
+  base = CodeCache::low_bound();
 }
 
 void PCRecorder::record(address pc) {
--- a/hotspot/src/share/vm/runtime/frame.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/runtime/frame.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -68,6 +68,15 @@
   // Constructors
   frame();
 
+#ifndef PRODUCT
+  // This is a generic constructor which is only used by pns() in debug.cpp.
+  // pns (i.e. print native stack) uses this constructor to create a starting
+  // frame for stack walking. The implementation of this constructor is platform
+  // dependent (i.e. SPARC doesn't need an 'fp' argument an will ignore it) but
+  // we want to keep the signature generic because pns() is shared code.
+  frame(void* sp, void* fp, void* pc);
+#endif
+
   // Accessors
 
   // pc: Returns the pc at which this frame will continue normally.
--- a/hotspot/src/share/vm/runtime/globals.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -186,6 +186,10 @@
 define_pd_global(intx, InlineUnsafeOps,              true);
 define_pd_global(intx, InitialCodeCacheSize,         160*K);
 define_pd_global(intx, ReservedCodeCacheSize,        32*M);
+define_pd_global(intx, NonProfiledCodeHeapSize,      0);
+define_pd_global(intx, ProfiledCodeHeapSize,         0);
+define_pd_global(intx, NonMethodCodeHeapSize,        32*M);
+
 define_pd_global(intx, CodeCacheExpansionSize,       32*K);
 define_pd_global(intx, CodeCacheMinBlockLength,      1);
 define_pd_global(intx, CodeCacheMinimumUseSpace,     200*K);
@@ -3354,9 +3358,21 @@
   develop_pd(uintx, CodeCacheMinimumUseSpace,                               \
           "Minimum code cache size (in bytes) required to start VM.")       \
                                                                             \
+  product(bool, SegmentedCodeCache, false,                                  \
+          "Use a segmented code cache")                                     \
+                                                                            \
   product_pd(uintx, ReservedCodeCacheSize,                                  \
           "Reserved code cache size (in bytes) - maximum code cache size")  \
                                                                             \
+  product_pd(uintx, NonProfiledCodeHeapSize,                                \
+          "Size of code heap with non-profiled methods (in bytes)")         \
+                                                                            \
+  product_pd(uintx, ProfiledCodeHeapSize,                                   \
+          "Size of code heap with profiled methods (in bytes)")             \
+                                                                            \
+  product_pd(uintx, NonMethodCodeHeapSize,                                  \
+          "Size of code heap with non-methods (in bytes)")                  \
+                                                                            \
   product(uintx, CodeCacheMinimumFreeSpace, 500*K,                          \
           "When less than X space left, we stop compiling")                 \
                                                                             \
--- a/hotspot/src/share/vm/runtime/init.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/runtime/init.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -49,6 +49,7 @@
 void management_init();
 void bytecodes_init();
 void classLoader_init();
+void compilationPolicy_init();
 void codeCache_init();
 void VM_Version_init();
 void os_init_globals();        // depends on VM_Version_init, before universe_init
@@ -68,7 +69,6 @@
 void vtableStubs_init();
 void InlineCacheBuffer_init();
 void compilerOracle_init();
-void compilationPolicy_init();
 void compileBroker_init();
 
 // Initialization after compiler initialization
@@ -97,6 +97,7 @@
   management_init();
   bytecodes_init();
   classLoader_init();
+  compilationPolicy_init();
   codeCache_init();
   VM_Version_init();
   os_init_globals();
@@ -123,7 +124,6 @@
   vtableStubs_init();
   InlineCacheBuffer_init();
   compilerOracle_init();
-  compilationPolicy_init();
   compileBroker_init();
   VMRegImpl::set_regName();
 
--- a/hotspot/src/share/vm/runtime/serviceThread.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/runtime/serviceThread.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -29,6 +29,7 @@
 #include "runtime/mutexLocker.hpp"
 #include "runtime/os.hpp"
 #include "prims/jvmtiImpl.hpp"
+#include "services/allocationContextService.hpp"
 #include "services/gcNotifier.hpp"
 #include "services/diagnosticArgument.hpp"
 #include "services/diagnosticFramework.hpp"
@@ -86,6 +87,7 @@
     bool has_jvmti_events = false;
     bool has_gc_notification_event = false;
     bool has_dcmd_notification_event = false;
+    bool acs_notify = false;
     JvmtiDeferredEvent jvmti_event;
     {
       // Need state transition ThreadBlockInVM so that this thread
@@ -102,7 +104,8 @@
       while (!(sensors_changed = LowMemoryDetector::has_pending_requests()) &&
              !(has_jvmti_events = JvmtiDeferredEventQueue::has_events()) &&
               !(has_gc_notification_event = GCNotifier::has_event()) &&
-              !(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification())) {
+              !(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification()) &&
+             !(acs_notify = AllocationContextService::should_notify())) {
         // wait until one of the sensors has pending requests, or there is a
         // pending JVMTI event or JMX GC notification to post
         Service_lock->wait(Mutex::_no_safepoint_check_flag);
@@ -128,6 +131,10 @@
     if(has_dcmd_notification_event) {
       DCmdFactory::send_notification(CHECK);
     }
+
+    if (acs_notify) {
+      AllocationContextService::notify(CHECK);
+    }
   }
 }
 
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -2422,7 +2422,7 @@
       // Ought to log this but compile log is only per compile thread
       // and we're some non descript Java thread.
       MutexUnlocker mu(AdapterHandlerLibrary_lock);
-      CompileBroker::handle_full_code_cache();
+      CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
       return NULL; // Out of CodeCache space
     }
     entry->relocate(new_adapter->content_begin());
@@ -2596,7 +2596,7 @@
     nm->post_compiled_method_load_event();
   } else {
     // CodeCache is full, disable compilation
-    CompileBroker::handle_full_code_cache();
+    CompileBroker::handle_full_code_cache(CodeBlobType::MethodNonProfiled);
   }
 }
 
--- a/hotspot/src/share/vm/runtime/sweeper.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/runtime/sweeper.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -131,7 +131,7 @@
 #define SWEEP(nm)
 #endif
 
-nmethod* NMethodSweeper::_current                      = NULL; // Current nmethod
+NMethodIterator NMethodSweeper::_current;                      // Current nmethod
 long     NMethodSweeper::_traversals                   = 0;    // Stack scan count, also sweep ID.
 long     NMethodSweeper::_total_nof_code_cache_sweeps  = 0;    // Total number of full sweeps of the code cache
 long     NMethodSweeper::_time_counter                 = 0;    // Virtual time used to periodically invoke sweeper
@@ -150,26 +150,24 @@
                                                                //   3) zombie      -> marked_for_reclamation
 int    NMethodSweeper::_hotness_counter_reset_val       = 0;
 
-long   NMethodSweeper::_total_nof_methods_reclaimed     = 0;    // Accumulated nof methods flushed
-long   NMethodSweeper::_total_nof_c2_methods_reclaimed  = 0;    // Accumulated nof methods flushed
-size_t NMethodSweeper::_total_flushed_size              = 0;    // Total number of bytes flushed from the code cache
-Tickspan  NMethodSweeper::_total_time_sweeping;                 // Accumulated time sweeping
-Tickspan  NMethodSweeper::_total_time_this_sweep;               // Total time this sweep
-Tickspan  NMethodSweeper::_peak_sweep_time;                     // Peak time for a full sweep
-Tickspan  NMethodSweeper::_peak_sweep_fraction_time;            // Peak time sweeping one fraction
-
+long   NMethodSweeper::_total_nof_methods_reclaimed     = 0;   // Accumulated nof methods flushed
+long   NMethodSweeper::_total_nof_c2_methods_reclaimed  = 0;   // Accumulated nof methods flushed
+size_t NMethodSweeper::_total_flushed_size              = 0;   // Total number of bytes flushed from the code cache
+Tickspan NMethodSweeper::_total_time_sweeping;                 // Accumulated time sweeping
+Tickspan NMethodSweeper::_total_time_this_sweep;               // Total time this sweep
+Tickspan NMethodSweeper::_peak_sweep_time;                     // Peak time for a full sweep
+Tickspan NMethodSweeper::_peak_sweep_fraction_time;            // Peak time sweeping one fraction
 
 
 class MarkActivationClosure: public CodeBlobClosure {
 public:
   virtual void do_code_blob(CodeBlob* cb) {
-    if (cb->is_nmethod()) {
-      nmethod* nm = (nmethod*)cb;
-      nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
-      // If we see an activation belonging to a non_entrant nmethod, we mark it.
-      if (nm->is_not_entrant()) {
-        nm->mark_as_seen_on_stack();
-      }
+    assert(cb->is_nmethod(), "CodeBlob should be nmethod");
+    nmethod* nm = (nmethod*)cb;
+    nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
+    // If we see an activation belonging to a non_entrant nmethod, we mark it.
+    if (nm->is_not_entrant()) {
+      nm->mark_as_seen_on_stack();
     }
   }
 };
@@ -178,10 +176,9 @@
 class SetHotnessClosure: public CodeBlobClosure {
 public:
   virtual void do_code_blob(CodeBlob* cb) {
-    if (cb->is_nmethod()) {
-      nmethod* nm = (nmethod*)cb;
-      nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
-    }
+    assert(cb->is_nmethod(), "CodeBlob should be nmethod");
+    nmethod* nm = (nmethod*)cb;
+    nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
   }
 };
 static SetHotnessClosure set_hotness_closure;
@@ -194,7 +191,7 @@
   return _hotness_counter_reset_val;
 }
 bool NMethodSweeper::sweep_in_progress() {
-  return (_current != NULL);
+  return !_current.end();
 }
 
 // Scans the stacks of all Java threads and marks activations of not-entrant methods.
@@ -212,11 +209,13 @@
   _time_counter++;
 
   // Check for restart
-  assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
+  assert(CodeCache::find_blob_unsafe(_current.method()) == _current.method(), "Sweeper nmethod cached state invalid");
   if (!sweep_in_progress()) {
     _seen = 0;
     _sweep_fractions_left = NmethodSweepFraction;
-    _current = CodeCache::first_nmethod();
+    _current = NMethodIterator();
+    // Initialize to first nmethod
+    _current.next();
     _traversals += 1;
     _total_time_this_sweep = Tickspan();
 
@@ -271,7 +270,9 @@
     // an unsigned type would cause an underflow (wait_until_next_sweep becomes a large positive
     // value) that disables the intended periodic sweeps.
     const int max_wait_time = ReservedCodeCacheSize / (16 * M);
-    double wait_until_next_sweep = max_wait_time - time_since_last_sweep - CodeCache::reverse_free_ratio();
+    double wait_until_next_sweep = max_wait_time - time_since_last_sweep -
+        MAX2(CodeCache::reverse_free_ratio(CodeBlobType::MethodProfiled),
+             CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled));
     assert(wait_until_next_sweep <= (double)max_wait_time, "Calculation of code cache sweeper interval is incorrect");
 
     if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) {
@@ -353,7 +354,7 @@
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 
     // The last invocation iterates until there are no more nmethods
-    for (int i = 0; (i < todo || _sweep_fractions_left == 1) && _current != NULL; i++) {
+    while ((swept_count < todo || _sweep_fractions_left == 1) && !_current.end()) {
       swept_count++;
       if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
         if (PrintMethodFlushing && Verbose) {
@@ -369,19 +370,19 @@
       // Since we will give up the CodeCache_lock, always skip ahead
       // to the next nmethod.  Other blobs can be deleted by other
       // threads but nmethods are only reclaimed by the sweeper.
-      nmethod* next = CodeCache::next_nmethod(_current);
+      nmethod* nm = _current.method();
+      _current.next();
 
       // Now ready to process nmethod and give up CodeCache_lock
       {
         MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-        freed_memory += process_nmethod(_current);
+        freed_memory += process_nmethod(nm);
       }
       _seen++;
-      _current = next;
     }
   }
 
-  assert(_sweep_fractions_left > 1 || _current == NULL, "must have scanned the whole cache");
+  assert(_sweep_fractions_left > 1 || _current.end(), "must have scanned the whole cache");
 
   const Ticks sweep_end_counter = Ticks::now();
   const Tickspan sweep_time = sweep_end_counter - sweep_start_counter;
@@ -594,7 +595,8 @@
       // ReservedCodeCacheSize
       int reset_val = hotness_counter_reset_val();
       int time_since_reset = reset_val - nm->hotness_counter();
-      double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity);
+      int code_blob_type = (CodeCache::get_code_blob_type(nm->comp_level()));
+      double threshold = -reset_val + (CodeCache::reverse_free_ratio(code_blob_type) * NmethodSweepActivity);
       // The less free space in the code cache we have - the bigger reverse_free_ratio() is.
       // I.e., 'threshold' increases with lower available space in the code cache and a higher
       // NmethodSweepActivity. If the current hotness counter - which decreases from its initial
@@ -614,12 +616,7 @@
       // The stack-scanning low-cost detection may not see the method was used (which can happen for
       // flat profiles). Check the age counter for possible data.
       if (UseCodeAging && make_not_entrant && (nm->is_compiled_by_c2() || nm->is_compiled_by_c1())) {
-        MethodCounters* mc = nm->method()->method_counters();
-        if (mc == NULL) {
-          // Sometimes we can get here without MethodCounters. For example if we run with -Xcomp.
-          // Try to allocate them.
-          mc = nm->method()->get_method_counters(Thread::current());
-        }
+        MethodCounters* mc = nm->method()->get_method_counters(Thread::current());
         if (mc != NULL) {
           // Snapshot the value as it's changed concurrently
           int age = mc->nmethod_age();
--- a/hotspot/src/share/vm/runtime/sweeper.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/runtime/sweeper.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -54,33 +54,33 @@
 //     is full.
 
 class NMethodSweeper : public AllStatic {
-  static long      _traversals;                     // Stack scan count, also sweep ID.
-  static long      _total_nof_code_cache_sweeps;    // Total number of full sweeps of the code cache
-  static long      _time_counter;                   // Virtual time used to periodically invoke sweeper
-  static long      _last_sweep;                     // Value of _time_counter when the last sweep happened
-  static nmethod*  _current;                        // Current nmethod
-  static int       _seen;                           // Nof. nmethod we have currently processed in current pass of CodeCache
-  static int       _flushed_count;                  // Nof. nmethods flushed in current sweep
-  static int       _zombified_count;                // Nof. nmethods made zombie in current sweep
-  static int       _marked_for_reclamation_count;   // Nof. nmethods marked for reclaim in current sweep
+  static long      _traversals;                   // Stack scan count, also sweep ID.
+  static long      _total_nof_code_cache_sweeps;  // Total number of full sweeps of the code cache
+  static long      _time_counter;                 // Virtual time used to periodically invoke sweeper
+  static long      _last_sweep;                   // Value of _time_counter when the last sweep happened
+  static NMethodIterator _current;                // Current nmethod
+  static int       _seen;                         // Nof. nmethod we have currently processed in current pass of CodeCache
+  static int       _flushed_count;                // Nof. nmethods flushed in current sweep
+  static int       _zombified_count;              // Nof. nmethods made zombie in current sweep
+  static int       _marked_for_reclamation_count; // Nof. nmethods marked for reclaim in current sweep
 
-  static volatile int  _sweep_fractions_left;       // Nof. invocations left until we are completed with this pass
-  static volatile int  _sweep_started;              // Flag to control conc sweeper
-  static volatile bool _should_sweep;               // Indicates if we should invoke the sweeper
-  static volatile int  _bytes_changed;              // Counts the total nmethod size if the nmethod changed from:
-                                                    //   1) alive       -> not_entrant
-                                                    //   2) not_entrant -> zombie
-                                                    //   3) zombie      -> marked_for_reclamation
+  static volatile int  _sweep_fractions_left;     // Nof. invocations left until we are completed with this pass
+  static volatile int  _sweep_started;            // Flag to control conc sweeper
+  static volatile bool _should_sweep;             // Indicates if we should invoke the sweeper
+  static volatile int _bytes_changed;             // Counts the total nmethod size if the nmethod changed from:
+                                                  //   1) alive       -> not_entrant
+                                                  //   2) not_entrant -> zombie
+                                                  //   3) zombie      -> marked_for_reclamation
   // Stat counters
   static long      _total_nof_methods_reclaimed;    // Accumulated nof methods flushed
   static long      _total_nof_c2_methods_reclaimed; // Accumulated nof C2-compiled methods flushed
   static size_t    _total_flushed_size;             // Total size of flushed methods
   static int       _hotness_counter_reset_val;
 
-  static Tickspan  _total_time_sweeping;            // Accumulated time sweeping
-  static Tickspan  _total_time_this_sweep;          // Total time this sweep
-  static Tickspan  _peak_sweep_time;                // Peak time for a full sweep
-  static Tickspan  _peak_sweep_fraction_time;       // Peak time sweeping one fraction
+  static Tickspan  _total_time_sweeping;          // Accumulated time sweeping
+  static Tickspan  _total_time_this_sweep;        // Total time this sweep
+  static Tickspan  _peak_sweep_time;              // Peak time for a full sweep
+  static Tickspan  _peak_sweep_fraction_time;     // Peak time sweeping one fraction
 
   static int  process_nmethod(nmethod *nm);
   static void release_nmethod(nmethod* nm);
@@ -98,7 +98,7 @@
 
 
 #ifdef ASSERT
-  static bool is_sweeping(nmethod* which) { return _current == which; }
+  static bool is_sweeping(nmethod* which) { return _current.method() == which; }
   // Keep track of sweeper activity in the ring buffer
   static void record_sweep(nmethod* nm, int line);
   static void report_events(int id, address entry);
--- a/hotspot/src/share/vm/runtime/thread.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -826,6 +826,7 @@
       st->print("os_prio=%d ", os_prio);
     }
     st->print("tid=" INTPTR_FORMAT " ", this);
+    ext().print_on(st);
     osthread()->print_on(st);
   }
   debug_only(if (WizardMode) print_owned_locks_on(st);)
@@ -2964,6 +2965,8 @@
   // Push the Java priority down to the native thread; needs Threads_lock
   Thread::set_priority(this, prio);
 
+  prepare_ext();
+
   // Add the new thread to the Threads list and set it in motion.
   // We must have threads lock in order to call Threads::add.
   // It is crucial that we do not block before the thread is
@@ -3795,6 +3798,24 @@
   }
 }
 
+JavaThread* Threads::find_java_thread_from_java_tid(jlong java_tid) {
+  assert(Threads_lock->owned_by_self(), "Must hold Threads_lock");
+
+  JavaThread* java_thread = NULL;
+  // Sequential search for now.  Need to do better optimization later.
+  for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
+    oop tobj = thread->threadObj();
+    if (!thread->is_exiting() &&
+        tobj != NULL &&
+        java_tid == java_lang_Thread::thread_id(tobj)) {
+      java_thread = thread;
+      break;
+    }
+  }
+  return java_thread;
+}
+
+
 // Last thread running calls java.lang.Shutdown.shutdown()
 void JavaThread::invoke_shutdown_hooks() {
   HandleMark hm(this);
--- a/hotspot/src/share/vm/runtime/thread.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -40,6 +40,7 @@
 #include "runtime/safepoint.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/threadLocalStorage.hpp"
+#include "runtime/thread_ext.hpp"
 #include "runtime/unhandledOops.hpp"
 #include "utilities/macros.hpp"
 
@@ -256,6 +257,8 @@
 
   TRACE_DATA _trace_data;                       // Thread-local data for tracing
 
+  ThreadExt _ext;
+
   int   _vm_operation_started_count;            // VM_Operation support
   int   _vm_operation_completed_count;          // VM_Operation support
 
@@ -409,6 +412,9 @@
 
   TRACE_DATA* trace_data()              { return &_trace_data; }
 
+  const ThreadExt& ext() const          { return _ext; }
+  ThreadExt& ext()                      { return _ext; }
+
   // VM operation support
   int vm_operation_ticket()                      { return ++_vm_operation_started_count; }
   int vm_operation_completed_count()             { return _vm_operation_completed_count; }
@@ -978,6 +984,7 @@
   // not specified, use the priority of the thread object. Threads_lock
   // must be held while this function is called.
   void prepare(jobject jni_thread, ThreadPriority prio=NoPriority);
+  void prepare_ext();
 
   void set_saved_exception_pc(address pc)        { _saved_exception_pc = pc; }
   address saved_exception_pc()                   { return _saved_exception_pc; }
@@ -1910,6 +1917,8 @@
   // Deoptimizes all frames tied to marked nmethods
   static void deoptimized_wrt_marked_nmethods();
 
+  static JavaThread* find_java_thread_from_java_tid(jlong java_tid);
+
 };
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/runtime/thread_ext.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/thread.hpp"
+#include "runtime/thread_ext.hpp"
+
+void JavaThread::prepare_ext() {
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/runtime/thread_ext.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_THREAD_EXT_HPP
+#define SHARE_VM_RUNTIME_THREAD_EXT_HPP
+
+#include "memory/allocation.hpp"
+
+class ThreadExt VALUE_OBJ_CLASS_SPEC {
+public:
+  void print_on(outputStream* st) const {};
+};
+
+#endif // SHARE_VM_RUNTIME_THREAD_EXT_HPP
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -765,8 +765,8 @@
   /* CodeCache (NOTE: incomplete) */                                                                                                 \
   /********************************/                                                                                                 \
                                                                                                                                      \
-     static_field(CodeCache,                   _heap,                                         CodeHeap*)                             \
-     static_field(CodeCache,                   _scavenge_root_nmethods,                       nmethod*)                              \
+  static_field(CodeCache,                      _heaps,                                        GrowableArray<CodeHeap*>*)             \
+  static_field(CodeCache,                      _scavenge_root_nmethods,                       nmethod*)                              \
                                                                                                                                      \
   /*******************************/                                                                                                  \
   /* CodeHeap (NOTE: incomplete) */                                                                                                  \
--- a/hotspot/src/share/vm/runtime/vm_operations.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/runtime/vm_operations.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -68,6 +68,7 @@
   template(G1CollectFull)                         \
   template(G1CollectForAllocation)                \
   template(G1IncCollectionPause)                  \
+  template(DestroyAllocationContext)              \
   template(EnableBiasedLocking)                   \
   template(RevokeBias)                            \
   template(BulkRevokeBias)                        \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/allocationContextService.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_ALLOCATION_CONTEXT_SERVICE_HPP
+#define SHARE_VM_SERVICES_ALLOCATION_CONTEXT_SERVICE_HPP
+
+#include "utilities/exceptions.hpp"
+
+class AllocationContextService: public AllStatic {
+public:
+  static inline bool should_notify();
+  static inline void notify(TRAPS);
+};
+
+bool AllocationContextService::should_notify() { return false; }
+void AllocationContextService::notify(TRAPS) { }
+
+#endif // SHARE_VM_SERVICES_ALLOCATION_CONTEXT_SERVICE_HPP
--- a/hotspot/src/share/vm/services/diagnosticCommand.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/services/diagnosticCommand.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -427,7 +427,7 @@
     return "Compiler.codelist";
   }
   static const char* description() {
-    return "Print all compiled methods in code cache.";
+    return "Print all compiled methods in code cache that are alive";
   }
   static const char* impact() {
     return "Medium";
--- a/hotspot/src/share/vm/services/heapDumper.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/services/heapDumper.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -722,7 +722,7 @@
 
       // reflection and sun.misc.Unsafe classes may have a reference to a
       // Klass* so filter it out.
-      assert(o->is_oop_or_null(), "should always be an oop");
+      assert(o->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(o)));
       writer->write_objectID(o);
       break;
     }
--- a/hotspot/src/share/vm/services/management.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/services/management.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -392,23 +392,6 @@
   return (instanceOop) element();
 }
 
-// Helper functions
-static JavaThread* find_java_thread_from_id(jlong thread_id) {
-  assert(Threads_lock->owned_by_self(), "Must hold Threads_lock");
-
-  JavaThread* java_thread = NULL;
-  // Sequential search for now.  Need to do better optimization later.
-  for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
-    oop tobj = thread->threadObj();
-    if (!thread->is_exiting() &&
-        tobj != NULL &&
-        thread_id == java_lang_Thread::thread_id(tobj)) {
-      java_thread = thread;
-      break;
-    }
-  }
-  return java_thread;
-}
 
 static GCMemoryManager* get_gc_memory_manager_from_jobject(jobject mgr, TRAPS) {
   if (mgr == NULL) {
@@ -445,6 +428,8 @@
   return MemoryService::get_memory_pool(ph);
 }
 
+#endif // INCLUDE_MANAGEMENT
+
 static void validate_thread_id_array(typeArrayHandle ids_ah, TRAPS) {
   int num_threads = ids_ah->length();
 
@@ -460,6 +445,8 @@
   }
 }
 
+#if INCLUDE_MANAGEMENT
+
 static void validate_thread_info_array(objArrayHandle infoArray_h, TRAPS) {
   // check if the element of infoArray is of type ThreadInfo class
   Klass* threadinfo_klass = Management::java_lang_management_ThreadInfo_klass(CHECK);
@@ -823,45 +810,6 @@
   return prev;
 JVM_END
 
-// Gets an array containing the amount of memory allocated on the Java
-// heap for a set of threads (in bytes).  Each element of the array is
-// the amount of memory allocated for the thread ID specified in the
-// corresponding entry in the given array of thread IDs; or -1 if the
-// thread does not exist or has terminated.
-JVM_ENTRY(void, jmm_GetThreadAllocatedMemory(JNIEnv *env, jlongArray ids,
-                                             jlongArray sizeArray))
-  // Check if threads is null
-  if (ids == NULL || sizeArray == NULL) {
-    THROW(vmSymbols::java_lang_NullPointerException());
-  }
-
-  ResourceMark rm(THREAD);
-  typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(ids));
-  typeArrayHandle ids_ah(THREAD, ta);
-
-  typeArrayOop sa = typeArrayOop(JNIHandles::resolve_non_null(sizeArray));
-  typeArrayHandle sizeArray_h(THREAD, sa);
-
-  // validate the thread id array
-  validate_thread_id_array(ids_ah, CHECK);
-
-  // sizeArray must be of the same length as the given array of thread IDs
-  int num_threads = ids_ah->length();
-  if (num_threads != sizeArray_h->length()) {
-    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
-              "The length of the given long array does not match the length of "
-              "the given array of thread IDs");
-  }
-
-  MutexLockerEx ml(Threads_lock);
-  for (int i = 0; i < num_threads; i++) {
-    JavaThread* java_thread = find_java_thread_from_id(ids_ah->long_at(i));
-    if (java_thread != NULL) {
-      sizeArray_h->long_at_put(i, java_thread->cooked_allocated_bytes());
-    }
-  }
-JVM_END
-
 // Returns a java/lang/management/MemoryUsage object representing
 // the memory usage for the heap or non-heap memory.
 JVM_ENTRY(jobject, jmm_GetMemoryUsage(JNIEnv* env, jboolean heap))
@@ -1167,7 +1115,7 @@
     MutexLockerEx ml(Threads_lock);
     for (int i = 0; i < num_threads; i++) {
       jlong tid = ids_ah->long_at(i);
-      JavaThread* jt = find_java_thread_from_id(tid);
+      JavaThread* jt = Threads::find_java_thread_from_java_tid(tid);
       oop thread_obj = (jt != NULL ? jt->threadObj() : (oop)NULL);
       instanceHandle threadObj_h(THREAD, (instanceOop) thread_obj);
       thread_handle_array->append(threadObj_h);
@@ -1244,7 +1192,7 @@
       MutexLockerEx ml(Threads_lock);
       for (int i = 0; i < num_threads; i++) {
         jlong tid = ids_ah->long_at(i);
-        JavaThread* jt = find_java_thread_from_id(tid);
+        JavaThread* jt = Threads::find_java_thread_from_java_tid(tid);
         ThreadSnapshot* ts;
         if (jt == NULL) {
           // if the thread does not exist or now it is terminated,
@@ -1488,7 +1436,7 @@
         }
       } else {
         // reset contention statistics for a given thread
-        JavaThread* java_thread = find_java_thread_from_id(tid);
+        JavaThread* java_thread = Threads::find_java_thread_from_java_tid(tid);
         if (java_thread == NULL) {
           return false;
         }
@@ -1557,7 +1505,7 @@
     return os::current_thread_cpu_time();
   } else {
     MutexLockerEx ml(Threads_lock);
-    java_thread = find_java_thread_from_id(thread_id);
+    java_thread = Threads::find_java_thread_from_java_tid(thread_id);
     if (java_thread != NULL) {
       return os::thread_cpu_time((Thread*) java_thread);
     }
@@ -1565,78 +1513,6 @@
   return -1;
 JVM_END
 
-// Returns the CPU time consumed by a given thread (in nanoseconds).
-// If thread_id == 0, CPU time for the current thread is returned.
-// If user_sys_cpu_time = true, user level and system CPU time of
-// a given thread is returned; otherwise, only user level CPU time
-// is returned.
-JVM_ENTRY(jlong, jmm_GetThreadCpuTimeWithKind(JNIEnv *env, jlong thread_id, jboolean user_sys_cpu_time))
-  if (!os::is_thread_cpu_time_supported()) {
-    return -1;
-  }
-
-  if (thread_id < 0) {
-    THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
-               "Invalid thread ID", -1);
-  }
-
-  JavaThread* java_thread = NULL;
-  if (thread_id == 0) {
-    // current thread
-    return os::current_thread_cpu_time(user_sys_cpu_time != 0);
-  } else {
-    MutexLockerEx ml(Threads_lock);
-    java_thread = find_java_thread_from_id(thread_id);
-    if (java_thread != NULL) {
-      return os::thread_cpu_time((Thread*) java_thread, user_sys_cpu_time != 0);
-    }
-  }
-  return -1;
-JVM_END
-
-// Gets an array containing the CPU times consumed by a set of threads
-// (in nanoseconds).  Each element of the array is the CPU time for the
-// thread ID specified in the corresponding entry in the given array
-// of thread IDs; or -1 if the thread does not exist or has terminated.
-// If user_sys_cpu_time = true, the sum of user level and system CPU time
-// for the given thread is returned; otherwise, only user level CPU time
-// is returned.
-JVM_ENTRY(void, jmm_GetThreadCpuTimesWithKind(JNIEnv *env, jlongArray ids,
-                                              jlongArray timeArray,
-                                              jboolean user_sys_cpu_time))
-  // Check if threads is null
-  if (ids == NULL || timeArray == NULL) {
-    THROW(vmSymbols::java_lang_NullPointerException());
-  }
-
-  ResourceMark rm(THREAD);
-  typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(ids));
-  typeArrayHandle ids_ah(THREAD, ta);
-
-  typeArrayOop tia = typeArrayOop(JNIHandles::resolve_non_null(timeArray));
-  typeArrayHandle timeArray_h(THREAD, tia);
-
-  // validate the thread id array
-  validate_thread_id_array(ids_ah, CHECK);
-
-  // timeArray must be of the same length as the given array of thread IDs
-  int num_threads = ids_ah->length();
-  if (num_threads != timeArray_h->length()) {
-    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
-              "The length of the given long array does not match the length of "
-              "the given array of thread IDs");
-  }
-
-  MutexLockerEx ml(Threads_lock);
-  for (int i = 0; i < num_threads; i++) {
-    JavaThread* java_thread = find_java_thread_from_id(ids_ah->long_at(i));
-    if (java_thread != NULL) {
-      timeArray_h->long_at_put(i, os::thread_cpu_time((Thread*)java_thread,
-                                                      user_sys_cpu_time != 0));
-    }
-  }
-JVM_END
-
 // Returns a String array of all VM global flag names
 JVM_ENTRY(jobjectArray, jmm_GetVMGlobalNames(JNIEnv *env))
   // last flag entry is always NULL, so subtract 1
@@ -2331,7 +2207,122 @@
   return (jlong)(((double)ticks / (double)os::elapsed_frequency())
                  * (double)1000.0);
 }
+#endif // INCLUDE_MANAGEMENT
 
+// Gets an array containing the amount of memory allocated on the Java
+// heap for a set of threads (in bytes).  Each element of the array is
+// the amount of memory allocated for the thread ID specified in the
+// corresponding entry in the given array of thread IDs; or -1 if the
+// thread does not exist or has terminated.
+JVM_ENTRY(void, jmm_GetThreadAllocatedMemory(JNIEnv *env, jlongArray ids,
+                                             jlongArray sizeArray))
+  // Check if threads is null
+  if (ids == NULL || sizeArray == NULL) {
+    THROW(vmSymbols::java_lang_NullPointerException());
+  }
+
+  ResourceMark rm(THREAD);
+  typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(ids));
+  typeArrayHandle ids_ah(THREAD, ta);
+
+  typeArrayOop sa = typeArrayOop(JNIHandles::resolve_non_null(sizeArray));
+  typeArrayHandle sizeArray_h(THREAD, sa);
+
+  // validate the thread id array
+  validate_thread_id_array(ids_ah, CHECK);
+
+  // sizeArray must be of the same length as the given array of thread IDs
+  int num_threads = ids_ah->length();
+  if (num_threads != sizeArray_h->length()) {
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+              "The length of the given long array does not match the length of "
+              "the given array of thread IDs");
+  }
+
+  MutexLockerEx ml(Threads_lock);
+  for (int i = 0; i < num_threads; i++) {
+    JavaThread* java_thread = Threads::find_java_thread_from_java_tid(ids_ah->long_at(i));
+    if (java_thread != NULL) {
+      sizeArray_h->long_at_put(i, java_thread->cooked_allocated_bytes());
+    }
+  }
+JVM_END
+
+// Returns the CPU time consumed by a given thread (in nanoseconds).
+// If thread_id == 0, CPU time for the current thread is returned.
+// If user_sys_cpu_time = true, user level and system CPU time of
+// a given thread is returned; otherwise, only user level CPU time
+// is returned.
+JVM_ENTRY(jlong, jmm_GetThreadCpuTimeWithKind(JNIEnv *env, jlong thread_id, jboolean user_sys_cpu_time))
+  if (!os::is_thread_cpu_time_supported()) {
+    return -1;
+  }
+
+  if (thread_id < 0) {
+    THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+               "Invalid thread ID", -1);
+  }
+
+  JavaThread* java_thread = NULL;
+  if (thread_id == 0) {
+    // current thread
+    return os::current_thread_cpu_time(user_sys_cpu_time != 0);
+  } else {
+    MutexLockerEx ml(Threads_lock);
+    java_thread = Threads::find_java_thread_from_java_tid(thread_id);
+    if (java_thread != NULL) {
+      return os::thread_cpu_time((Thread*) java_thread, user_sys_cpu_time != 0);
+    }
+  }
+  return -1;
+JVM_END
+
+// Gets an array containing the CPU times consumed by a set of threads
+// (in nanoseconds).  Each element of the array is the CPU time for the
+// thread ID specified in the corresponding entry in the given array
+// of thread IDs; or -1 if the thread does not exist or has terminated.
+// If user_sys_cpu_time = true, the sum of user level and system CPU time
+// for the given thread is returned; otherwise, only user level CPU time
+// is returned.
+JVM_ENTRY(void, jmm_GetThreadCpuTimesWithKind(JNIEnv *env, jlongArray ids,
+                                              jlongArray timeArray,
+                                              jboolean user_sys_cpu_time))
+  // Check if threads is null
+  if (ids == NULL || timeArray == NULL) {
+    THROW(vmSymbols::java_lang_NullPointerException());
+  }
+
+  ResourceMark rm(THREAD);
+  typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(ids));
+  typeArrayHandle ids_ah(THREAD, ta);
+
+  typeArrayOop tia = typeArrayOop(JNIHandles::resolve_non_null(timeArray));
+  typeArrayHandle timeArray_h(THREAD, tia);
+
+  // validate the thread id array
+  validate_thread_id_array(ids_ah, CHECK);
+
+  // timeArray must be of the same length as the given array of thread IDs
+  int num_threads = ids_ah->length();
+  if (num_threads != timeArray_h->length()) {
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+              "The length of the given long array does not match the length of "
+              "the given array of thread IDs");
+  }
+
+  MutexLockerEx ml(Threads_lock);
+  for (int i = 0; i < num_threads; i++) {
+    JavaThread* java_thread = Threads::find_java_thread_from_java_tid(ids_ah->long_at(i));
+    if (java_thread != NULL) {
+      timeArray_h->long_at_put(i, os::thread_cpu_time((Thread*)java_thread,
+                                                      user_sys_cpu_time != 0));
+    }
+  }
+JVM_END
+
+
+
+#if INCLUDE_MANAGEMENT
 const struct jmmInterface_1_ jmm_interface = {
   NULL,
   NULL,
--- a/hotspot/src/share/vm/services/memoryService.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/services/memoryService.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -63,7 +63,9 @@
 
 GCMemoryManager* MemoryService::_minor_gc_manager      = NULL;
 GCMemoryManager* MemoryService::_major_gc_manager      = NULL;
-MemoryPool*      MemoryService::_code_heap_pool        = NULL;
+MemoryManager*   MemoryService::_code_cache_manager    = NULL;
+GrowableArray<MemoryPool*>* MemoryService::_code_heap_pools =
+    new (ResourceObj::C_HEAP, mtInternal) GrowableArray<MemoryPool*>(init_code_heap_pools_size, true);
 MemoryPool*      MemoryService::_metaspace_pool        = NULL;
 MemoryPool*      MemoryService::_compressed_class_pool = NULL;
 
@@ -388,15 +390,21 @@
 }
 #endif // INCLUDE_ALL_GCS
 
-void MemoryService::add_code_heap_memory_pool(CodeHeap* heap) {
-  _code_heap_pool = new CodeHeapPool(heap,
-                                     "Code Cache",
-                                     true /* support_usage_threshold */);
-  MemoryManager* mgr = MemoryManager::get_code_cache_memory_manager();
-  mgr->add_pool(_code_heap_pool);
+void MemoryService::add_code_heap_memory_pool(CodeHeap* heap, const char* name) {
+  // Create new memory pool for this heap
+  MemoryPool* code_heap_pool = new CodeHeapPool(heap, name, true /* support_usage_threshold */);
+
+  // Append to lists
+  _code_heap_pools->append(code_heap_pool);
+  _pools_list->append(code_heap_pool);
 
-  _pools_list->append(_code_heap_pool);
-  _managers_list->append(mgr);
+  if (_code_cache_manager == NULL) {
+    // Create CodeCache memory manager
+    _code_cache_manager = MemoryManager::get_code_cache_memory_manager();
+    _managers_list->append(_code_cache_manager);
+  }
+
+  _code_cache_manager->add_pool(code_heap_pool);
 }
 
 void MemoryService::add_metaspace_memory_pools() {
--- a/hotspot/src/share/vm/services/memoryService.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/services/memoryService.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -53,7 +53,8 @@
 private:
   enum {
     init_pools_list_size = 10,
-    init_managers_list_size = 5
+    init_managers_list_size = 5,
+    init_code_heap_pools_size = 9
   };
 
   // index for minor and major generations
@@ -70,8 +71,9 @@
   static GCMemoryManager*               _major_gc_manager;
   static GCMemoryManager*               _minor_gc_manager;
 
-  // Code heap memory pool
-  static MemoryPool*                    _code_heap_pool;
+  // memory manager and code heap pools for the CodeCache
+  static MemoryManager*                 _code_cache_manager;
+  static GrowableArray<MemoryPool*>*    _code_heap_pools;
 
   static MemoryPool*                    _metaspace_pool;
   static MemoryPool*                    _compressed_class_pool;
@@ -123,7 +125,7 @@
 
 public:
   static void set_universe_heap(CollectedHeap* heap);
-  static void add_code_heap_memory_pool(CodeHeap* heap);
+  static void add_code_heap_memory_pool(CodeHeap* heap, const char* name);
   static void add_metaspace_memory_pools();
 
   static MemoryPool*    get_memory_pool(instanceHandle pool);
@@ -146,7 +148,10 @@
 
   static void track_memory_usage();
   static void track_code_cache_memory_usage() {
-    track_memory_pool_usage(_code_heap_pool);
+    // Track memory pool usage of all CodeCache memory pools
+    for (int i = 0; i < _code_heap_pools->length(); ++i) {
+      track_memory_pool_usage(_code_heap_pools->at(i));
+    }
   }
   static void track_metaspace_memory_usage() {
     track_memory_pool_usage(_metaspace_pool);
--- a/hotspot/src/share/vm/trace/trace.xml	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/trace/trace.xml	Fri Sep 26 01:40:31 2014 -0700
@@ -394,6 +394,7 @@
 
     <event id="CodeCacheFull" path="vm/code_cache/full" label="Code Cache Full"
          has_thread="true" is_requestable="false" is_constant="false" is_instant="true">
+      <value type="CODEBLOBTYPE" field="codeBlobType" label="Code Heap"/>
       <value type="ADDRESS" field="startAddress" label="Start Address"/>
       <value type="ADDRESS" field="commitedTopAddress" label="Commited Top"/>
       <value type="ADDRESS" field="reservedTopAddress" label="Reserved Top"/>
--- a/hotspot/src/share/vm/trace/tracetypes.xml	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/trace/tracetypes.xml	Fri Sep 26 01:40:31 2014 -0700
@@ -170,6 +170,11 @@
                   type="U1" jvm_type="FLAGVALUEORIGIN">
       <value type="UTF8" field="origin" label="origin" />
     </content_type>
+    
+    <content_type id="CodeBlobType" hr_name="Code Blob Type"
+                  type="U1" jvm_type="CODEBLOBTYPE">
+      <value type="UTF8" field="type" label="type" />
+    </content_type>
 
   </content_types>
 
@@ -371,6 +376,10 @@
     <!-- FLAGVALUEORIGIN -->
     <primary_type symbol="FLAGVALUEORIGIN" datatype="U1"
                   contenttype="FLAGVALUEORIGIN" type="u1" sizeop="sizeof(u1)" />
+                  
+    <!-- CODEBLOBTYPE -->
+    <primary_type symbol="CODEBLOBTYPE" datatype="U1"
+                  contenttype="CODEBLOBTYPE" type="u1" sizeop="sizeof(u1)" />
 
   </primary_types>
 </types>
--- a/hotspot/src/share/vm/utilities/debug.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/utilities/debug.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -653,6 +653,13 @@
   tty->print_cr("  pm(int pc)    - print Method* given compiled PC");
   tty->print_cr("  findm(intptr_t pc) - finds Method*");
   tty->print_cr("  find(intptr_t x)   - finds & prints nmethod/stub/bytecode/oop based on pointer into it");
+  tty->print_cr("  pns(void* sp, void* fp, void* pc)  - print native (i.e. mixed) stack trace. E.g.");
+  tty->print_cr("                   pns($sp, $rbp, $pc) on Linux/amd64 and Solaris/amd64 or");
+  tty->print_cr("                   pns($sp, $ebp, $pc) on Linux/x86 or");
+  tty->print_cr("                   pns($sp, 0, $pc)    on Linux/ppc64 or");
+  tty->print_cr("                   pns($sp + 0x7ff, 0, $pc) on Solaris/SPARC");
+  tty->print_cr("                 - in gdb do 'set overload-resolution off' before calling pns()");
+  tty->print_cr("                 - in dbx do 'frame 1' before calling pns()");
 
   tty->print_cr("misc.");
   tty->print_cr("  flush()       - flushes the log file");
@@ -665,3 +672,56 @@
 }
 
 #endif // !PRODUCT
+
+void print_native_stack(outputStream* st, frame fr, Thread* t, char* buf, int buf_size) {
+
+  // see if it's a valid frame
+  if (fr.pc()) {
+    st->print_cr("Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code)");
+
+    int count = 0;
+    while (count++ < StackPrintLimit) {
+      fr.print_on_error(st, buf, buf_size);
+      st->cr();
+      // Compiled code may use EBP register on x86 so it looks like
+      // non-walkable C frame. Use frame.sender() for java frames.
+      if (t && t->is_Java_thread()) {
+        // Catch very first native frame by using stack address.
+        // For JavaThread stack_base and stack_size should be set.
+        if (!t->on_local_stack((address)(fr.real_fp() + 1))) {
+          break;
+        }
+        if (fr.is_java_frame() || fr.is_native_frame() || fr.is_runtime_frame()) {
+          RegisterMap map((JavaThread*)t, false); // No update
+          fr = fr.sender(&map);
+        } else {
+          fr = os::get_sender_for_C_frame(&fr);
+        }
+      } else {
+        // is_first_C_frame() does only simple checks for frame pointer,
+        // it will pass if java compiled code has a pointer in EBP.
+        if (os::is_first_C_frame(&fr)) break;
+        fr = os::get_sender_for_C_frame(&fr);
+      }
+    }
+
+    if (count > StackPrintLimit) {
+      st->print_cr("...<more frames>...");
+    }
+
+    st->cr();
+  }
+}
+
+#ifndef PRODUCT
+
+extern "C" void pns(void* sp, void* fp, void* pc) { // print native stack
+  Command c("pns");
+  static char buf[O_BUFLEN];
+  Thread* t = ThreadLocalStorage::get_thread_slow();
+  // Call generic frame constructor (certain arguments may be ignored)
+  frame fr(sp, fp, pc);
+  print_native_stack(tty, fr, t, buf, sizeof(buf));
+}
+
+#endif // !PRODUCT
--- a/hotspot/src/share/vm/utilities/debug.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/utilities/debug.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -263,4 +263,7 @@
 void pd_ps(frame f);
 void pd_obfuscate_location(char *buf, size_t buflen);
 
+class outputStream;
+void print_native_stack(outputStream* st, frame fr, Thread* t, char* buf, int buf_size);
+
 #endif // SHARE_VM_UTILITIES_DEBUG_HPP
--- a/hotspot/src/share/vm/utilities/taskqueue.hpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/utilities/taskqueue.hpp	Fri Sep 26 01:40:31 2014 -0700
@@ -331,7 +331,7 @@
     //            index, &_elems[index], _elems[index]);
     E* t = (E*)&_elems[index];      // cast away volatility
     oop* p = (oop*)t;
-    assert((*t)->is_oop_or_null(), "Not an oop or null");
+    assert((*t)->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(*t)));
     f->do_oop(p);
   }
   // tty->print_cr("END OopTaskQueue::oops_do");
--- a/hotspot/src/share/vm/utilities/vmError.cpp	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/src/share/vm/utilities/vmError.cpp	Fri Sep 26 01:40:31 2014 -0700
@@ -577,7 +577,7 @@
 
   STEP(120, "(printing native stack)" )
 
-     if (_verbose) {
+   if (_verbose) {
      if (os::platform_print_native_stack(st, _context, buf, sizeof(buf))) {
        // We have printed the native stack in platform-specific code
        // Windows/x64 needs special handling.
@@ -585,43 +585,7 @@
        frame fr = _context ? os::fetch_frame_from_context(_context)
                            : os::current_frame();
 
-       // see if it's a valid frame
-       if (fr.pc()) {
-          st->print_cr("Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code)");
-
-
-          int count = 0;
-          while (count++ < StackPrintLimit) {
-             fr.print_on_error(st, buf, sizeof(buf));
-             st->cr();
-             // Compiled code may use EBP register on x86 so it looks like
-             // non-walkable C frame. Use frame.sender() for java frames.
-             if (_thread && _thread->is_Java_thread()) {
-               // Catch very first native frame by using stack address.
-               // For JavaThread stack_base and stack_size should be set.
-               if (!_thread->on_local_stack((address)(fr.sender_sp() + 1))) {
-                 break;
-               }
-               if (fr.is_java_frame()) {
-                 RegisterMap map((JavaThread*)_thread, false); // No update
-                 fr = fr.sender(&map);
-               } else {
-                 fr = os::get_sender_for_C_frame(&fr);
-               }
-             } else {
-               // is_first_C_frame() does only simple checks for frame pointer,
-               // it will pass if java compiled code has a pointer in EBP.
-               if (os::is_first_C_frame(&fr)) break;
-               fr = os::get_sender_for_C_frame(&fr);
-             }
-          }
-
-          if (count > StackPrintLimit) {
-             st->print_cr("...<more frames>...");
-          }
-
-          st->cr();
-       }
+       print_native_stack(st, fr, _thread, buf, sizeof(buf));
      }
    }
 
--- a/hotspot/test/TEST.groups	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/test/TEST.groups	Fri Sep 26 01:40:31 2014 -0700
@@ -323,8 +323,243 @@
 hotspot_wbapitest = \
   sanity/
 
-hotspot_compiler = \
-  sanity/ExecuteInternalVMTests.java
+hotspot_compiler_1 = \
+  compiler/5057225/Test5057225.java \
+  compiler/5091921/Test5091921.java \
+  compiler/5091921/Test6186134.java \
+  compiler/5091921/Test6196102.java \
+  compiler/5091921/Test6357214.java \
+  compiler/5091921/Test6559156.java \
+  compiler/5091921/Test6753639.java \
+  compiler/5091921/Test6935022.java \
+  compiler/5091921/Test6959129.java \
+  compiler/5091921/Test6985295.java \
+  compiler/5091921/Test6992759.java \
+  compiler/5091921/Test7005594.java \
+  compiler/5091921/Test7020614.java \
+  compiler/6378821/Test6378821.java \
+  compiler/6431242/Test.java \
+  compiler/6443505/Test6443505.java \
+  compiler/6478991/NullCheckTest.java \
+  compiler/6539464/Test.java \
+  compiler/6579789/Test6579789.java \
+  compiler/6636138/ \
+  compiler/6646019/Test.java \
+  compiler/6659207/Test.java \
+  compiler/6661247/Test.java \
+  compiler/6663621/IVTest.java \
+  compiler/6689060/Test.java \
+  compiler/6695810/Test.java \
+  compiler/6700047/Test6700047.java \
+  compiler/6711100/Test.java \
+  compiler/6724218/Test.java \
+  compiler/6732154/Test6732154.java \
+  compiler/6758234/Test6758234.java \
+  compiler/6769124/ \
+  compiler/6772683/InterruptedTest.java \
+  compiler/6778657/Test.java \
+  compiler/6795161/Test.java \
+  compiler/6795362/Test6795362.java \
+  compiler/6795465/Test6795465.java \
+  compiler/6796786/Test6796786.java \
+  compiler/6799693/Test.java \
+  compiler/6805724/Test6805724.java \
+  compiler/6814842/Test6814842.java \
+  compiler/6823453/Test.java \
+  compiler/6833129/Test.java \
+  compiler/6837011/Test6837011.java \
+  compiler/6843752/Test.java \
+  compiler/6849574/Test.java \
+  compiler/6855164/Test.java \
+  compiler/6855215/Test6855215.java \
+  compiler/6857159/Test6857159.java \
+  compiler/6860469/Test.java \
+  compiler/6863155/Test6863155.java \
+  compiler/6863420/Test.java \
+  compiler/6865265/StackOverflowBug.java \
+  compiler/6879902/Test6879902.java \
+  compiler/6880034/Test6880034.java \
+  compiler/6891750/Test6891750.java \
+  compiler/6892265/Test.java \
+  compiler/6894807/IsInstanceTest.java \
+  compiler/6901572/Test.java \
+  compiler/6909839/Test6909839.java \
+  compiler/6910484/Test.java \
+  compiler/6910605/Test.java \
+  compiler/6910618/Test.java \
+  compiler/6916644/Test6916644.java \
+  compiler/6921969/TestMultiplyLongHiZero.java \
+  compiler/6930043/Test6930043.java \
+  compiler/6932496/Test6932496.java \
+  compiler/6956668/Test6956668.java \
+  compiler/6968348/Test6968348.java \
+  compiler/6973329/Test.java
+
+hotspot_compiler_2 = \
+  compiler/6982370/Test6982370.java \
+  compiler/7009231/Test7009231.java \
+  compiler/7009359/Test7009359.java \
+  compiler/7017746/Test.java \
+  compiler/7024475/Test7024475.java \
+  compiler/7041100/Test7041100.java \
+  compiler/7044738/Test7044738.java \
+  compiler/7046096/Test7046096.java \
+  compiler/7048332/Test7048332.java \
+  compiler/7068051/Test7068051.java \
+  compiler/7082949/Test7082949.java \
+  compiler/7088020/Test7088020.java \
+  compiler/7090976/Test7090976.java \
+  compiler/7103261/Test7103261.java \
+  compiler/7110586/Test7110586.java \
+  compiler/7119644/ \
+  compiler/7141637/SpreadNullArg.java \
+  compiler/7169782/Test7169782.java \
+  compiler/7174363/Test7174363.java \
+  compiler/7179138/ \
+  compiler/7190310/ \
+  compiler/7192963/ \
+  compiler/7200264/TestIntVect.java \
+  compiler/8000805/Test8000805.java \
+  compiler/8002069/Test8002069.java \
+  compiler/8004741/Test8004741.java \
+  compiler/8005033/Test8005033.java \
+  compiler/8005419/Test8005419.java \
+  compiler/8005956/PolynomialRoot.java \
+  compiler/8007294/Test8007294.java
+
+hotspot_compiler_3 = \
+  compiler/8007722/Test8007722.java \
+  compiler/8009761/Test8009761.java \
+  compiler/8010927/Test8010927.java \
+  compiler/8011706/Test8011706.java \
+  compiler/8011771/Test8011771.java \
+  compiler/8011901/Test8011901.java \
+  compiler/arraycopy/TestMissingControl.java \
+  compiler/ciReplay/TestVM_no_comp_level.sh \
+  compiler/classUnloading/anonymousClass/TestAnonymousClassUnloading.java \
+  compiler/codecache/CheckUpperLimit.java \
+  compiler/codegen/ \
+  compiler/cpuflags/RestoreMXCSR.java \
+  compiler/EscapeAnalysis/ \
+  compiler/exceptions/TestRecursiveReplacedException.java \
+  compiler/floatingpoint/ModNaN.java \
+  compiler/gcbarriers/G1CrashTest.java \
+  compiler/inlining/ \
+  compiler/IntegerArithmetic/TestIntegerComparison.java \
+  compiler/intrinsics/bmi/TestAndnI.java \
+  compiler/intrinsics/bmi/TestAndnI.java \
+  compiler/intrinsics/bmi/TestAndnL.java \
+  compiler/intrinsics/bmi/TestBlsiI.java \
+  compiler/intrinsics/bmi/TestBlsiL.java \
+  compiler/intrinsics/bmi/TestBlsmskI.java \
+  compiler/intrinsics/bmi/TestBlsmskL.java \
+  compiler/intrinsics/bmi/TestBlsrI.java \
+  compiler/intrinsics/bmi/TestBlsrL.java \
+  compiler/intrinsics/bmi/TestLzcntI.java \
+  compiler/intrinsics/bmi/TestLzcntL.java \
+  compiler/intrinsics/bmi/TestTzcntI.java \
+  compiler/intrinsics/bmi/TestTzcntL.java \
+  compiler/intrinsics/clone/TestObjectClone.java \
+  compiler/intrinsics/hashcode/TestHashCode.java \
+  compiler/intrinsics/mathexact/CompareTest.java \
+  compiler/intrinsics/mathexact/GVNTest.java \
+  compiler/intrinsics/mathexact/NegExactILoadTest.java \
+  compiler/intrinsics/mathexact/NegExactILoopDependentTest.java \
+  compiler/intrinsics/mathexact/NegExactINonConstantTest.java \
+  compiler/intrinsics/mathexact/SubExactICondTest.java \
+  compiler/intrinsics/mathexact/SubExactILoadTest.java \
+  compiler/intrinsics/mathexact/SubExactILoopDependentTest.java \
+  compiler/intrinsics/stringequals/TestStringEqualsBadLength.java \
+  compiler/intrinsics/unsafe/UnsafeGetAddressTest.java \
+  compiler/jsr292/ConcurrentClassLoadingTest.java \
+  compiler/jsr292/CreatesInterfaceDotEqualsCallInfo.java \
+  compiler/jsr292/CreatesInterfaceDotEqualsCallInfo.java \
+  compiler/loopopts/TestLogSum.java \
+  compiler/macronodes/TestEliminateAllocationPhi.java \
+  compiler/membars/TestMemBarAcquire.java \
+  compiler/osr/TestOSRWithNonEmptyStack.java \
+  compiler/profiling/TestMethodHandleInvokesIntrinsic.java \
+  compiler/profiling/TestSpecTrapClassUnloading.java \
+  compiler/profiling/TestUnexpectedProfilingMismatch.java \
+  compiler/regalloc/C1ObjectSpillInLogicOp.java \
+  compiler/startup/NumCompilerThreadsCheck.java \
+  compiler/startup/SmallCodeCacheStartup.java \
+  compiler/types/TestSpeculationFailedHigherEqual.java \
+  compiler/types/TypeSpeculation.java \
+  compiler/uncommontrap/StackOverflowGuardPagesOff.java \
+  compiler/uncommontrap/TestStackBangMonitorOwned.java \
+  compiler/uncommontrap/TestStackBangRbp.java \
+  compiler/unsafe/GetUnsafeObjectG1PreBarrier.java
+
+hotspot_compiler_closed = \
+  closed/compiler/4292742/Test.java \
+  closed/compiler/4474154/Test4474154.java \
+  closed/compiler/4482613/Test4482613.java \
+  closed/compiler/4490177/tctest.java \
+  closed/compiler/4495990/Application.java \
+  closed/compiler/4522874/Test4522874.sh \
+  closed/compiler/4629512/Test4629512.java \
+  closed/compiler/4647299/Looper.java \
+  closed/compiler/4655758/TestClass.java \
+  closed/compiler/4671453/LongCompTest.java \
+  closed/compiler/4671460/CharArrTest.java \
+  closed/compiler/4709105/StringTest2.java \
+  closed/compiler/4732721/Bug.java \
+  closed/compiler/4750681/ReadTest.java \
+  closed/compiler/4787943/LongCrash.java \
+  closed/compiler/4819903/Base64Test.java \
+  closed/compiler/4903383/Test.java \
+  closed/compiler/4906393/Test.java \
+  closed/compiler/4907999/Uidtest.java \
+  closed/compiler/4917709/Tester.java \
+  closed/compiler/4957832/Test.java \
+  closed/compiler/4965430/LoopTest.java \
+  closed/compiler/4979449/T4979449.java \
+  closed/compiler/5031274/Test.java \
+  closed/compiler/5043395/T5043395.java \
+  closed/compiler/5049410/Test.java \
+  closed/compiler/5098422/Test.java \
+  closed/compiler/6173783/Test.java \
+  closed/compiler/6272923/Test6272923.sh \
+  closed/compiler/6290963/Test.java \
+  closed/compiler/6305546/Test.java \
+  closed/compiler/6309806/Test.java \
+  closed/compiler/6311859/Test.java \
+  closed/compiler/6321689/Test.java \
+  closed/compiler/6326935/Test.java \
+  closed/compiler/6367889/Test.java \
+  closed/compiler/6371167/Test.java \
+  closed/compiler/6389127/Test.java \
+  closed/compiler/6397650/Test.java \
+  closed/compiler/6414932/Test.java \
+  closed/compiler/6421619/Test_6421619.java \
+  closed/compiler/6427750/UnsafeVolatile.java \
+  closed/compiler/6431243/Test.java \
+  closed/compiler/6433572/TestSyncJSR.java \
+  closed/compiler/6433840/clinit.java \
+  closed/compiler/6457854/Test.java \
+  closed/compiler/6476804/Test.java \
+  closed/compiler/6512111/CorruptFinalLong.java \
+  closed/compiler/6551887/Test.java \
+  closed/compiler/6571539/Test.java \
+  closed/compiler/6587132/Test.java \
+  closed/compiler/6588045/Test.java \
+  closed/compiler/6588598/etype.java \
+  closed/compiler/6661918/Test6661918.java \
+  closed/compiler/6707044/Test.java \
+  closed/compiler/6730716/Test.java \
+  closed/compiler/6772368/Test6772368.sh \
+  closed/compiler/6897150/Test6897150.java \
+  closed/compiler/6931567/Test6931567.java \
+  closed/compiler/7196857/Test7196857.java \
+  closed/compiler/8009699/Test8009699.java \
+  closed/compiler/8009699/Test8009699B.java \
+  closed/compiler/8014811/Test8014811.java \
+  closed/compiler/8029507/InvokePrivate.java \
+  closed/compiler/callingConvention/Arg9Double.java \
+  closed/compiler/deoptimization/DeoptArithmetic.java \
+  closed/compiler/deoptimization/TestDoubleLocals.java \
+  closed/compiler/deoptimization/TestDoubleMerge.java
 
 hotspot_gc = \
   sanity/ExecuteInternalVMTests.java
@@ -343,7 +578,7 @@
  -runtime/SharedArchiveFile/CdsSameObjectAlignment.java \
  -runtime/SharedArchiveFile/DefaultUseWithClient.java \
  -runtime/Thread/CancellableThreadTest.java \
- -runtime/runtime/7158988/FieldMonitor.java
+ -runtime/7158988/FieldMonitor.java
 
 hotspot_runtime_closed = \
   sanity/ExecuteInternalVMTests.java
@@ -352,7 +587,10 @@
   sanity/ExecuteInternalVMTests.java
 
 hotspot_all = \
-  :hotspot_compiler \
+  :hotspot_compiler_1 \
+  :hotspot_compiler_2 \
+  :hotspot_compiler_3 \
+  :hotspot_compiler_closed \
   :hotspot_gc \
   :hotspot_runtime \
   :hotspot_serviceability
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/codecache/CheckSegmentedCodeCache.java	Fri Sep 26 01:40:31 2014 -0700
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.oracle.java.testlibrary.*;
+
+/*
+ * @test CheckSegmentedCodeCache
+ * @bug 8015774
+ * @summary "Checks VM options related to the segmented code cache"
+ * @library /testlibrary
+ * @run main/othervm CheckSegmentedCodeCache
+ */
+public class CheckSegmentedCodeCache {
+  // Code heap names
+  private static final String NON_METHOD = "CodeHeap 'non-methods'";
+  private static final String PROFILED = "CodeHeap 'profiled nmethods'";
+  private static final String NON_PROFILED = "CodeHeap 'non-profiled nmethods'";
+
+  private static void verifySegmentedCodeCache(ProcessBuilder pb, boolean enabled) throws Exception {
+    OutputAnalyzer out = new OutputAnalyzer(pb.start());
+    if (enabled) {
+      try {
+        // Non-method code heap should be always available with the segmented code cache
+        out.shouldContain(NON_METHOD);
+      } catch (RuntimeException e) {
+        // TieredCompilation is disabled in a client VM
+        out.shouldContain("TieredCompilation is disabled in this release.");
+      }
+    } else {
+      out.shouldNotContain(NON_METHOD);
+    }
+    out.shouldHaveExitValue(0);
+  }
+
+  private static void verifyCodeHeapNotExists(ProcessBuilder pb, String... heapNames) throws Exception {
+    OutputAnalyzer out = new OutputAnalyzer(pb.start());
+    for (String name : heapNames) {
+      out.shouldNotContain(name);
+    }
+  }
+
+  private static void failsWith(ProcessBuilder pb, String message) throws Exception {
+    OutputAnalyzer out = new OutputAnalyzer(pb.start());
+    out.shouldContain(message);
+    out.shouldHaveExitValue(1);
+  }
+
+  /**
+   * Check the result of segmented code cache related VM options.
+   */
+  public static void main(String[] args) throws Exception {
+    ProcessBuilder pb;
+
+    // Disabled with ReservedCodeCacheSize < 240MB
+    pb = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=239m",
+                                               "-XX:+PrintCodeCache", "-version");
+    verifySegmentedCodeCache(pb, false);
+
+    // Disabled without TieredCompilation
+    pb = ProcessTools.createJavaProcessBuilder("-XX:-TieredCompilation",
+                                               "-XX:+PrintCodeCache", "-version");
+    verifySegmentedCodeCache(pb, false);
+
+    // Enabled with TieredCompilation and ReservedCodeCacheSize >= 240MB
+    pb = ProcessTools.createJavaProcessBuilder("-XX:+TieredCompilation",
+                                               "-XX:ReservedCodeCacheSize=240m",
+                                               "-XX:+PrintCodeCache", "-version");
+    verifySegmentedCodeCache(pb, true);
+
+    // Always enabled if SegmentedCodeCache is set
+    pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
+                                               "-XX:-TieredCompilation",
+                                               "-XX:ReservedCodeCacheSize=239m",
+                                               "-XX:+PrintCodeCache", "-version");
+    verifySegmentedCodeCache(pb, true);
+
+    // The profiled and non-profiled code heaps should not be available in
+    // interpreter-only mode
+    pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
+                                               "-Xint",
+                                               "-XX:+PrintCodeCache", "-version");
+    verifyCodeHeapNotExists(pb, PROFILED, NON_PROFILED);
+    pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
+                                               "-XX:TieredStopAtLevel=0",
+                                               "-XX:+PrintCodeCache", "-version");
+    verifyCodeHeapNotExists(pb, PROFILED, NON_PROFILED);
+
+    // If we stop compilation at CompLevel_simple
+    pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
+                                               "-XX:TieredStopAtLevel=1",
+                                               "-XX:+PrintCodeCache", "-version");
+    verifyCodeHeapNotExists(pb, PROFILED);
+
+    // Fails with too small non-method code heap size
+    pb = ProcessTools.createJavaProcessBuilder("-XX:NonMethodCodeHeapSize=100K");
+    failsWith(pb, "Invalid NonMethodCodeHeapSize");
+
+    // Fails if code heap sizes do not add up
+    pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
+                                               "-XX:ReservedCodeCacheSize=10M",
+                                               "-XX:NonMethodCodeHeapSize=5M",
+                                               "-XX:ProfiledCodeHeapSize=5M",
+                                               "-XX:NonProfiledCodeHeapSize=5M");
+    failsWith(pb, "Invalid code heap sizes");
+
+    // Fails if not enough space for VM internal code
+    pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
+                                               "-XX:ReservedCodeCacheSize=1700K",
+                                               "-XX:InitialCodeCacheSize=100K");
+    failsWith(pb, "Not enough space in non-method code heap to run VM");
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/intrinsics/multiplytolen/TestMultiplyToLenReturnProfile.java	Fri Sep 26 01:40:31 2014 -0700
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8057758
+ * @summary MultiplyToLen sets its return type to have a bottom offset which confuses code generation
+ * @run main/othervm -XX:-TieredCompilation -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:TypeProfileLevel=222 TestMultiplyToLenReturnProfile
+ *
+ */
+
+
+import java.math.*;
+
+public class TestMultiplyToLenReturnProfile {
+
+    static BigInteger m(BigInteger i1, BigInteger i2) {
+        BigInteger res = BigInteger.valueOf(0);
+        for (int i = 0; i < 100; i++) {
+            res.add(i1.multiply(i2));
+        }
+        return res;
+    }
+
+    static public void main(String[] args) {
+        BigInteger v = BigInteger.valueOf(Integer.MAX_VALUE).pow(2);
+        for (int i = 0; i < 20000; i++) {
+            m(v, v.add(BigInteger.valueOf(1)));
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/unsafe/UnsafeRaw.java	Fri Sep 26 01:40:31 2014 -0700
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8058744
+ * @summary Invalid pattern-matching of address computations in raw unsafe
+ * @library /testlibrary
+ * @run main/othervm -Xbatch UnsafeRaw
+ */
+
+import com.oracle.java.testlibrary.Utils;
+import java.util.Random;
+
+public class UnsafeRaw {
+  public static class Tests {
+    public static int int_index(sun.misc.Unsafe unsafe, long base, int index) throws Exception {
+      return unsafe.getInt(base + (index << 2));
+    }
+    public static int long_index(sun.misc.Unsafe unsafe, long base, long index) throws Exception {
+      return unsafe.getInt(base + (index << 2));
+    }
+    public static int int_index_back_ashift(sun.misc.Unsafe unsafe, long base, int index) throws Exception {
+      return unsafe.getInt(base + (index >> 2));
+    }
+    public static int int_index_back_lshift(sun.misc.Unsafe unsafe, long base, int index) throws Exception {
+      return unsafe.getInt(base + (index >>> 2));
+    }
+    public static int long_index_back_ashift(sun.misc.Unsafe unsafe, long base, long index) throws Exception {
+      return unsafe.getInt(base + (index >> 2));
+    }
+    public static int long_index_back_lshift(sun.misc.Unsafe unsafe, long base, long index) throws Exception {
+      return unsafe.getInt(base + (index >>> 2));
+    }
+    public static int int_const_12345678_index(sun.misc.Unsafe unsafe, long base) throws Exception {
+      int idx4 = 0x12345678;
+      return unsafe.getInt(base + idx4);
+    }
+    public static int long_const_1234567890abcdef_index(sun.misc.Unsafe unsafe, long base) throws Exception {
+      long idx5 = 0x1234567890abcdefL;
+      return unsafe.getInt(base + idx5);
+    }
+    public static int int_index_mul(sun.misc.Unsafe unsafe, long base, int index) throws Exception {
+      return unsafe.getInt(base + (index * 4));
+    }
+    public static int long_index_mul(sun.misc.Unsafe unsafe, long base, long index) throws Exception {
+      return unsafe.getInt(base + (index * 4));
+    }
+    public static int int_index_mul_scale_16(sun.misc.Unsafe unsafe, long base, int index) throws Exception {
+      return unsafe.getInt(base + (index * 16));
+    }
+    public static int long_index_mul_scale_16(sun.misc.Unsafe unsafe, long base, long index) throws Exception {
+      return unsafe.getInt(base + (index * 16));
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    sun.misc.Unsafe unsafe = Utils.getUnsafe();
+    final int array_size = 128;
+    final int element_size = 4;
+    final int magic = 0x12345678;
+
+    Random rnd = new Random();
+
+    long array = unsafe.allocateMemory(array_size * element_size); // 128 ints
+    long addr = array + array_size * element_size / 2; // something in the middle to work with
+    unsafe.putInt(addr, magic);
+    for (int j = 0; j < 100000; j++) {
+       if (Tests.int_index(unsafe, addr, 0) != magic) throw new Exception();
+       if (Tests.long_index(unsafe, addr, 0) != magic) throw new Exception();
+       if (Tests.int_index_mul(unsafe, addr, 0) != magic) throw new Exception();
+       if (Tests.long_index_mul(unsafe, addr, 0) != magic) throw new Exception();
+       {
+         long idx1 = rnd.nextLong();
+         long addr1 = addr - (idx1 << 2);
+         if (Tests.long_index(unsafe, addr1, idx1) != magic) throw new Exception();
+       }
+       {
+         long idx2 = rnd.nextLong();
+         long addr2 = addr - (idx2 >> 2);
+         if (Tests.long_index_back_ashift(unsafe, addr2, idx2) != magic) throw new Exception();
+       }
+       {
+         long idx3 = rnd.nextLong();
+         long addr3 = addr - (idx3 >>> 2);
+         if (Tests.long_index_back_lshift(unsafe, addr3, idx3) != magic) throw new Exception();
+       }
+       {
+         long idx4 = 0x12345678;
+         long addr4 = addr - idx4;
+         if (Tests.int_const_12345678_index(unsafe, addr4) != magic) throw new Exception();
+       }
+       {
+         long idx5 = 0x1234567890abcdefL;
+         long addr5 = addr - idx5;
+         if (Tests.long_const_1234567890abcdef_index(unsafe, addr5) != magic) throw new Exception();
+       }
+       {
+         int idx6 = rnd.nextInt();
+         long addr6 = addr - (idx6 >> 2);
+         if (Tests.int_index_back_ashift(unsafe, addr6, idx6) != magic) throw new Exception();
+       }
+       {
+         int idx7 = rnd.nextInt();
+         long addr7 = addr - (idx7 >>> 2);
+         if (Tests.int_index_back_lshift(unsafe, addr7, idx7) != magic) throw new Exception();
+       }
+       {
+         int idx8 = rnd.nextInt();
+         long addr8 = addr - (idx8 * 16);
+         if (Tests.int_index_mul_scale_16(unsafe, addr8, idx8) != magic) throw new Exception();
+       }
+       {
+         long idx9 = rnd.nextLong();
+         long addr9 = addr - (idx9 * 16);
+         if (Tests.long_index_mul_scale_16(unsafe, addr9, idx9) != magic) throw new Exception();
+       }
+    }
+  }
+}
--- a/hotspot/test/runtime/CompressedOops/UseCompressedOops.java	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/test/runtime/CompressedOops/UseCompressedOops.java	Fri Sep 26 01:40:31 2014 -0700
@@ -52,18 +52,17 @@
                 .shouldContain("Compressed Oops mode")
                 .shouldHaveExitValue(0);
 
-            // Larger than 4gb heap should result in zero based with shift 3
-            testCompressedOops("-XX:+UseCompressedOops", "-Xmx5g")
-                .shouldContain("Zero based")
-                .shouldContain("Oop shift amount: 3")
-                .shouldHaveExitValue(0);
+            // Skip the following three test cases if we're on OSX or Solaris.
+            //
+            // OSX doesn't seem to care about HeapBaseMinAddress and Solaris
+            // puts the heap way up, forcing different behaviour.
+            if (!Platform.isOSX() && !Platform.isSolaris()) {
+                // Larger than 4gb heap should result in zero based with shift 3
+                testCompressedOops("-XX:+UseCompressedOops", "-Xmx5g")
+                    .shouldContain("Zero based")
+                    .shouldContain("Oop shift amount: 3")
+                    .shouldHaveExitValue(0);
 
-            // Skip the following three test cases if we're on OSX or Solaris Sparc.
-            //
-            // OSX doesn't seem to care about HeapBaseMinAddress and Solaris Sparc
-            // puts the heap way up, forcing different behaviour.
-
-            if (!Platform.isOSX() && !(Platform.isSolaris() && Platform.isSparc())) {
                 // Small heap above 4gb should result in zero based with shift 3
                 testCompressedOops("-XX:+UseCompressedOops", "-Xmx32m", "-XX:HeapBaseMinAddress=4g")
                     .shouldContain("Zero based")
@@ -83,6 +82,12 @@
                     .shouldContain("Non-zero based")
                     .shouldContain("Oop shift amount: 4")
                     .shouldHaveExitValue(0);
+
+                // 32gb heap with object alignment set to 16 bytes should result in zero based with shift 4
+                testCompressedOops("-XX:+UseCompressedOops", "-Xmx32g", "-XX:ObjectAlignmentInBytes=16")
+                    .shouldContain("Zero based")
+                    .shouldContain("Oop shift amount: 4")
+                    .shouldHaveExitValue(0);
             }
 
             // Explicitly enabling compressed oops with 32gb heap should result a warning
@@ -106,12 +111,6 @@
                 .shouldContain("Max heap size too large for Compressed Oops")
                 .shouldHaveExitValue(0);
 
-            // 32gb heap with object alignment set to 16 bytes should result in zero based with shift 4
-            testCompressedOops("-XX:+UseCompressedOops", "-Xmx32g", "-XX:ObjectAlignmentInBytes=16")
-                .shouldContain("Zero based")
-                .shouldContain("Oop shift amount: 4")
-                .shouldHaveExitValue(0);
-
         } else {
             // Compressed oops should only apply to 64bit platforms
             testCompressedOops("-XX:+UseCompressedOops", "-Xmx32m")
--- a/hotspot/test/serviceability/dcmd/CodeCacheTest.java	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/test/serviceability/dcmd/CodeCacheTest.java	Fri Sep 26 01:40:31 2014 -0700
@@ -25,7 +25,8 @@
  * @test CodeCacheTest
  * @bug 8054889
  * @build DcmdUtil CodeCacheTest
- * @run main CodeCacheTest
+ * @run main/othervm -XX:+SegmentedCodeCache CodeCacheTest
+ * @run main/othervm -XX:-SegmentedCodeCache CodeCacheTest
  * @summary Test of diagnostic command Compiler.codecache
  */
 
@@ -39,67 +40,105 @@
 
     /**
      * This test calls Jcmd (diagnostic command tool) Compiler.codecache and then parses the output,
-     * making sure that all number look ok
+     * making sure that all numbers look ok
      *
      *
-     * Expected output:
+     * Expected output without code cache segmentation:
      *
      * CodeCache: size=245760Kb used=4680Kb max_used=4680Kb free=241079Kb
      * bounds [0x00007f5bd9000000, 0x00007f5bd94a0000, 0x00007f5be8000000]
      * total_blobs=575 nmethods=69 adapters=423
      * compilation: enabled
+     *
+     * Expected output with code cache segmentation (number of segments may change):
+     *
+     * CodeHeap 'non-methods': size=5696Kb used=2236Kb max_used=2238Kb free=3459Kb
+     *  bounds [0x00007fa0f0ffe000, 0x00007fa0f126e000, 0x00007fa0f158e000]
+     * CodeHeap 'profiled nmethods': size=120036Kb used=8Kb max_used=8Kb free=120027Kb
+     *  bounds [0x00007fa0f158e000, 0x00007fa0f17fe000, 0x00007fa0f8ac7000]
+     * CodeHeap 'non-profiled nmethods': size=120036Kb used=2Kb max_used=2Kb free=120034Kb
+     *  bounds [0x00007fa0f8ac7000, 0x00007fa0f8d37000, 0x00007fa100000000]
+     * total_blobs=486 nmethods=8 adapters=399
+     * compilation: enabled
      */
 
-    static Pattern line1 = Pattern.compile("CodeCache: size=(\\p{Digit}*)Kb used=(\\p{Digit}*)Kb max_used=(\\p{Digit}*)Kb free=(\\p{Digit}*)Kb");
+    static Pattern line1 = Pattern.compile("(CodeCache|CodeHeap.*): size=(\\p{Digit}*)Kb used=(\\p{Digit}*)Kb max_used=(\\p{Digit}*)Kb free=(\\p{Digit}*)Kb");
     static Pattern line2 = Pattern.compile(" bounds \\[0x(\\p{XDigit}*), 0x(\\p{XDigit}*), 0x(\\p{XDigit}*)\\]");
     static Pattern line3 = Pattern.compile(" total_blobs=(\\p{Digit}*) nmethods=(\\p{Digit}*) adapters=(\\p{Digit}*)");
-    static Pattern line4 = Pattern.compile(" compilation: (\\w*)");
+    static Pattern line4 = Pattern.compile(" compilation: (.*)");
+
+    private static boolean getFlagBool(String flag, String where) {
+      Matcher m = Pattern.compile(flag + "\\s+:?= (true|false)").matcher(where);
+      if (!m.find()) {
+        throw new RuntimeException("Could not find value for flag " + flag + " in output string");
+      }
+      return m.group(1).equals("true");
+    }
+
+    private static int getFlagInt(String flag, String where) {
+      Matcher m = Pattern.compile(flag + "\\s+:?=\\s+\\d+").matcher(where);
+      if (!m.find()) {
+        throw new RuntimeException("Could not find value for flag " + flag + " in output string");
+      }
+      String match = m.group();
+      return Integer.parseInt(match.substring(match.lastIndexOf(" ") + 1, match.length()));
+    }
 
     public static void main(String arg[]) throws Exception {
+        // Get number of code cache segments
+        int segmentsCount = 0;
+        String flags = DcmdUtil.executeDcmd("VM.flags", "-all");
+        if (!getFlagBool("SegmentedCodeCache", flags) || !getFlagBool("UseCompiler", flags)) {
+          // No segmentation
+          segmentsCount = 1;
+        } else if (getFlagBool("TieredCompilation", flags) && getFlagInt("TieredStopAtLevel", flags) > 1) {
+          // Tiered compilation: use all segments
+          segmentsCount = 3;
+        } else {
+          // No TieredCompilation: only non-method and non-profiled segment
+          segmentsCount = 2;
+        }
 
         // Get output from dcmd (diagnostic command)
         String result = DcmdUtil.executeDcmd("Compiler.codecache");
         BufferedReader r = new BufferedReader(new StringReader(result));
 
-        // Validate first line
+        // Validate code cache segments
         String line;
-        line = r.readLine();
-        Matcher m = line1.matcher(line);
-        if (m.matches()) {
-            for(int i = 1; i <= 4; i++) {
-                int val = Integer.parseInt(m.group(i));
-                if (val < 0) {
-                    throw new Exception("Failed parsing dcmd codecache output");
-                }
-            }
-        } else {
-            throw new Exception("Regexp 1 failed");
-        }
+        Matcher m;
+        for (int s = 0; s < segmentsCount; ++s) {
+          // Validate first line
+          line = r.readLine();
+          m = line1.matcher(line);
+          if (m.matches()) {
+              for (int i = 2; i <= 5; i++) {
+                  int val = Integer.parseInt(m.group(i));
+                  if (val < 0) {
+                      throw new Exception("Failed parsing dcmd codecache output");
+                  }
+              }
+          } else {
+              throw new Exception("Regexp 1 failed");
+          }
 
-        // Validate second line
-        line = r.readLine();
-        m = line2.matcher(line);
-        if (m.matches()) {
-            long start = Long.parseLong(m.group(1), 16);
-            if (start < 0) {
-                throw new Exception("Failed parsing dcmd codecache output");
-            }
-            long mark = Long.parseLong(m.group(2), 16);
-            if (mark < 0) {
-                throw new Exception("Failed parsing dcmd codecache output");
-            }
-            long top = Long.parseLong(m.group(3), 16);
-            if (top < 0) {
-                throw new Exception("Failed parsing dcmd codecache output");
-            }
-            if (start > mark) {
-                throw new Exception("Failed parsing dcmd codecache output");
-            }
-            if (mark > top) {
-                throw new Exception("Failed parsing dcmd codecache output");
-            }
-        } else {
-            throw new Exception("Regexp 2 failed line: " + line);
+          // Validate second line
+          line = r.readLine();
+          m = line2.matcher(line);
+          if (m.matches()) {
+              String start = m.group(1);
+              String mark  = m.group(2);
+              String top   = m.group(3);
+
+              // Lexical compare of hex numbers to check that they look sane.
+              if (start.compareTo(mark) > 1) {
+                  throw new Exception("Failed parsing dcmd codecache output");
+              }
+              if (mark.compareTo(top) > 1) {
+                  throw new Exception("Failed parsing dcmd codecache output");
+              }
+          } else {
+              throw new Exception("Regexp 2 failed line: " + line);
+          }
         }
 
         // Validate third line
@@ -111,7 +150,7 @@
                 throw new Exception("Failed parsing dcmd codecache output");
             }
             int nmethods = Integer.parseInt(m.group(2));
-            if (nmethods <= 0) {
+            if (nmethods < 0) {
                 throw new Exception("Failed parsing dcmd codecache output");
             }
             int adapters = Integer.parseInt(m.group(3));
@@ -128,11 +167,7 @@
         // Validate fourth line
         line = r.readLine();
         m = line4.matcher(line);
-        if (m.matches()) {
-            if (!m.group(1).equals("enabled")) {
-                throw new Exception("Invalid message: '" + m.group(1) + "'");
-            }
-        } else {
+        if (!m.matches()) {
             throw new Exception("Regexp 4 failed");
         }
     }
--- a/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Wed Jul 05 20:02:40 2017 +0200
+++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Fri Sep 26 01:40:31 2014 -0700
@@ -30,6 +30,7 @@
 import java.util.function.Function;
 import java.util.stream.Stream;
 import java.security.BasicPermission;
+
 import sun.hotspot.parser.DiagnosticCommand;
 
 public class WhiteBox {
@@ -168,6 +169,12 @@
   // CPU features
   public native String getCPUFeatures();
 
+  // Native extensions
+  public native long getHeapUsageForContext(int context);
+  public native long getHeapRegionCountForContext(int context);
+  public native int getContextForObject(Object obj);
+  public native void printRegionInfo(int context);
+
   // VM flags
   public native void    setBooleanVMFlag(String name, boolean value);
   public native void    setIntxVMFlag(String name, long value);