8015774: Add support for multiple code heaps
authorthartmann
Wed, 17 Sep 2014 08:00:07 +0200
changeset 26796 666464578742
parent 26706 1e985d72c57f
child 26797 b89b8ffec1f0
8015774: Add support for multiple code heaps Summary: Support for segmentation of the code cache. Separate code heaps are created and used to store code of different types. Reviewed-by: kvn, iveresov, roland, anoll, egahlin, sla
hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java
hotspot/make/solaris/makefiles/mapfile-vers-COMPILER1
hotspot/make/solaris/makefiles/mapfile-vers-COMPILER2
hotspot/make/solaris/makefiles/mapfile-vers-TIERED
hotspot/src/cpu/ppc/vm/c2_globals_ppc.hpp
hotspot/src/cpu/sparc/vm/c1_globals_sparc.hpp
hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp
hotspot/src/cpu/x86/vm/c1_globals_x86.hpp
hotspot/src/cpu/x86/vm/c2_globals_x86.hpp
hotspot/src/cpu/zero/vm/shark_globals_zero.hpp
hotspot/src/os/bsd/dtrace/generateJvmOffsets.cpp
hotspot/src/os/bsd/dtrace/jhelper.d
hotspot/src/os/bsd/dtrace/libjvm_db.c
hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp
hotspot/src/os/solaris/dtrace/jhelper.d
hotspot/src/os/solaris/dtrace/libjvm_db.c
hotspot/src/share/vm/c1/c1_Compiler.cpp
hotspot/src/share/vm/c1/c1_Compiler.hpp
hotspot/src/share/vm/ci/ciEnv.cpp
hotspot/src/share/vm/code/codeBlob.cpp
hotspot/src/share/vm/code/codeBlob.hpp
hotspot/src/share/vm/code/codeCache.cpp
hotspot/src/share/vm/code/codeCache.hpp
hotspot/src/share/vm/code/nmethod.cpp
hotspot/src/share/vm/code/nmethod.hpp
hotspot/src/share/vm/code/vtableStubs.cpp
hotspot/src/share/vm/compiler/compileBroker.cpp
hotspot/src/share/vm/compiler/compileBroker.hpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
hotspot/src/share/vm/interpreter/interpreterRuntime.cpp
hotspot/src/share/vm/memory/heap.cpp
hotspot/src/share/vm/memory/heap.hpp
hotspot/src/share/vm/opto/c2compiler.cpp
hotspot/src/share/vm/opto/c2compiler.hpp
hotspot/src/share/vm/opto/compile.cpp
hotspot/src/share/vm/opto/output.cpp
hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.cpp
hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp
hotspot/src/share/vm/runtime/arguments.cpp
hotspot/src/share/vm/runtime/fprofiler.cpp
hotspot/src/share/vm/runtime/globals.hpp
hotspot/src/share/vm/runtime/init.cpp
hotspot/src/share/vm/runtime/sharedRuntime.cpp
hotspot/src/share/vm/runtime/sweeper.cpp
hotspot/src/share/vm/runtime/sweeper.hpp
hotspot/src/share/vm/runtime/vmStructs.cpp
hotspot/src/share/vm/services/memoryService.cpp
hotspot/src/share/vm/services/memoryService.hpp
hotspot/src/share/vm/trace/trace.xml
hotspot/src/share/vm/trace/tracetypes.xml
hotspot/test/compiler/codecache/CheckSegmentedCodeCache.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java	Wed Sep 17 08:00:07 2014 +0200
@@ -32,12 +32,10 @@
 import sun.jvm.hotspot.utilities.*;
 
 public class CodeCache {
-  private static AddressField       heapField;
-  private static AddressField       scavengeRootNMethodsField;
+  private static GrowableArray<CodeHeap> heapArray;
+  private static AddressField scavengeRootNMethodsField;
   private static VirtualConstructor virtualConstructor;
 
-  private CodeHeap heap;
-
   static {
     VM.registerVMInitializedObserver(new Observer() {
         public void update(Observable o, Object data) {
@@ -49,7 +47,10 @@
   private static synchronized void initialize(TypeDataBase db) {
     Type type = db.lookupType("CodeCache");
 
-    heapField = type.getAddressField("_heap");
+    // Get array of CodeHeaps
+    AddressField heapsField = type.getAddressField("_heaps");
+    heapArray = GrowableArray.create(heapsField.getValue(), new StaticBaseConstructor<CodeHeap>(CodeHeap.class));
+
     scavengeRootNMethodsField = type.getAddressField("_scavenge_root_nmethods");
 
     virtualConstructor = new VirtualConstructor(db);
@@ -67,16 +68,17 @@
     }
   }
 
-  public CodeCache() {
-    heap = (CodeHeap) VMObjectFactory.newObject(CodeHeap.class, heapField.getValue());
-  }
-
   public NMethod scavengeRootMethods() {
     return (NMethod) VMObjectFactory.newObject(NMethod.class, scavengeRootNMethodsField.getValue());
   }
 
   public boolean contains(Address p) {
-    return getHeap().contains(p);
+    for (int i = 0; i < heapArray.length(); ++i) {
+      if (heapArray.at(i).contains(p)) {
+        return true;
+      }
+    }
+    return false;
   }
 
   /** When VM.getVM().isDebugging() returns true, this behaves like
@@ -97,14 +99,24 @@
 
   public CodeBlob findBlobUnsafe(Address start) {
     CodeBlob result = null;
+    CodeHeap containing_heap = null;
+    for (int i = 0; i < heapArray.length(); ++i) {
+      if (heapArray.at(i).contains(start)) {
+        containing_heap = heapArray.at(i);
+        break;
+      }
+    }
+    if (containing_heap == null) {
+      return null;
+    }
 
     try {
-      result = (CodeBlob) virtualConstructor.instantiateWrapperFor(getHeap().findStart(start));
+      result = (CodeBlob) virtualConstructor.instantiateWrapperFor(containing_heap.findStart(start));
     }
     catch (WrongTypeException wte) {
       Address cbAddr = null;
       try {
-        cbAddr = getHeap().findStart(start);
+        cbAddr = containing_heap.findStart(start);
       }
       catch (Exception findEx) {
         findEx.printStackTrace();
@@ -167,31 +179,32 @@
   }
 
   public void iterate(CodeCacheVisitor visitor) {
-    CodeHeap heap = getHeap();
-    Address ptr = heap.begin();
-    Address end = heap.end();
-
-    visitor.prologue(ptr, end);
+    visitor.prologue(lowBound(), highBound());
     CodeBlob lastBlob = null;
-    while (ptr != null && ptr.lessThan(end)) {
-      try {
-        // Use findStart to get a pointer inside blob other findBlob asserts
-        CodeBlob blob = findBlobUnsafe(heap.findStart(ptr));
-        if (blob != null) {
-          visitor.visit(blob);
-          if (blob == lastBlob) {
-            throw new InternalError("saw same blob twice");
+
+    for (int i = 0; i < heapArray.length(); ++i) {
+      CodeHeap current_heap = heapArray.at(i);
+      Address ptr = current_heap.begin();
+      while (ptr != null && ptr.lessThan(current_heap.end())) {
+        try {
+          // Use findStart to get a pointer inside blob other findBlob asserts
+          CodeBlob blob = findBlobUnsafe(current_heap.findStart(ptr));
+          if (blob != null) {
+            visitor.visit(blob);
+            if (blob == lastBlob) {
+              throw new InternalError("saw same blob twice");
+            }
+            lastBlob = blob;
           }
-          lastBlob = blob;
+        } catch (RuntimeException e) {
+          e.printStackTrace();
         }
-      } catch (RuntimeException e) {
-        e.printStackTrace();
+        Address next = current_heap.nextBlock(ptr);
+        if (next != null && next.lessThan(ptr)) {
+          throw new InternalError("pointer moved backwards");
+        }
+        ptr = next;
       }
-      Address next = heap.nextBlock(ptr);
-      if (next != null && next.lessThan(ptr)) {
-        throw new InternalError("pointer moved backwards");
-      }
-      ptr = next;
     }
     visitor.epilogue();
   }
@@ -200,7 +213,23 @@
   // Internals only below this point
   //
 
-  private CodeHeap getHeap() {
-    return heap;
+  private Address lowBound() {
+    Address low = heapArray.at(0).begin();
+    for (int i = 1; i < heapArray.length(); ++i) {
+      if (heapArray.at(i).begin().lessThan(low)) {
+        low = heapArray.at(i).begin();
+      }
+    }
+    return low;
+  }
+
+  private Address highBound() {
+    Address high = heapArray.at(0).end();
+    for (int i = 1; i < heapArray.length(); ++i) {
+      if (heapArray.at(i).end().greaterThan(high)) {
+        high = heapArray.at(i).end();
+      }
+    }
+    return high;
   }
 }
--- a/hotspot/make/solaris/makefiles/mapfile-vers-COMPILER1	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/make/solaris/makefiles/mapfile-vers-COMPILER1	Wed Sep 17 08:00:07 2014 +0200
@@ -29,7 +29,7 @@
 SUNWprivate_1.1 {
         global:
                 # Dtrace support
-                __1cJCodeCacheF_heap_;
+                __1cJCodeCacheG_heaps_;
                 __1cIUniverseO_collectedHeap_;
                 __1cGMethodG__vtbl_;
                 __1cHnmethodG__vtbl_;
--- a/hotspot/make/solaris/makefiles/mapfile-vers-COMPILER2	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/make/solaris/makefiles/mapfile-vers-COMPILER2	Wed Sep 17 08:00:07 2014 +0200
@@ -29,7 +29,7 @@
 SUNWprivate_1.1 {
         global:
                 # Dtrace support
-                __1cJCodeCacheF_heap_;
+                __1cJCodeCacheG_heaps_;
                 __1cIUniverseO_collectedHeap_;
                 __1cGMethodG__vtbl_;
                 __1cHnmethodG__vtbl_;
--- a/hotspot/make/solaris/makefiles/mapfile-vers-TIERED	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/make/solaris/makefiles/mapfile-vers-TIERED	Wed Sep 17 08:00:07 2014 +0200
@@ -29,7 +29,7 @@
 SUNWprivate_1.1 {
         global:
                 # Dtrace support
-                __1cJCodeCacheF_heap_;
+                __1cJCodeCacheG_heaps_;
                 __1cIUniverseO_collectedHeap_;
                 __1cGMethodG__vtbl_;
                 __1cHnmethodG__vtbl_;
--- a/hotspot/src/cpu/ppc/vm/c2_globals_ppc.hpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/cpu/ppc/vm/c2_globals_ppc.hpp	Wed Sep 17 08:00:07 2014 +0200
@@ -79,6 +79,9 @@
 
 define_pd_global(intx, InitialCodeCacheSize,         2048*K); // Integral multiple of CodeCacheExpansionSize
 define_pd_global(intx, ReservedCodeCacheSize,        256*M);
+define_pd_global(intx, NonProfiledCodeHeapSize,      125*M);
+define_pd_global(intx, ProfiledCodeHeapSize,         126*M);
+define_pd_global(intx, NonMethodCodeHeapSize,        5*M  );
 define_pd_global(intx, CodeCacheExpansionSize,       64*K);
 
 // Ergonomics related flags
--- a/hotspot/src/cpu/sparc/vm/c1_globals_sparc.hpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/cpu/sparc/vm/c1_globals_sparc.hpp	Wed Sep 17 08:00:07 2014 +0200
@@ -47,6 +47,9 @@
 define_pd_global(intx, FreqInlineSize,               325  );
 define_pd_global(bool, ResizeTLAB,                   true );
 define_pd_global(intx, ReservedCodeCacheSize,        32*M );
+define_pd_global(intx, NonProfiledCodeHeapSize,      13*M );
+define_pd_global(intx, ProfiledCodeHeapSize,         14*M );
+define_pd_global(intx, NonMethodCodeHeapSize,        5*M  );
 define_pd_global(intx, CodeCacheExpansionSize,       32*K );
 define_pd_global(uintx, CodeCacheMinBlockLength,     1);
 define_pd_global(uintx, CodeCacheMinimumUseSpace,    400*K);
--- a/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp	Wed Sep 17 08:00:07 2014 +0200
@@ -74,6 +74,9 @@
 // InitialCodeCacheSize derived from specjbb2000 run.
 define_pd_global(intx, InitialCodeCacheSize,         2048*K); // Integral multiple of CodeCacheExpansionSize
 define_pd_global(intx, ReservedCodeCacheSize,        48*M);
+define_pd_global(intx, NonProfiledCodeHeapSize,      21*M);
+define_pd_global(intx, ProfiledCodeHeapSize,         22*M);
+define_pd_global(intx, NonMethodCodeHeapSize,        5*M );
 define_pd_global(intx, CodeCacheExpansionSize,       64*K);
 
 // Ergonomics related flags
@@ -82,6 +85,9 @@
 // InitialCodeCacheSize derived from specjbb2000 run.
 define_pd_global(intx, InitialCodeCacheSize,         1536*K); // Integral multiple of CodeCacheExpansionSize
 define_pd_global(intx, ReservedCodeCacheSize,        32*M);
+define_pd_global(intx, NonProfiledCodeHeapSize,      13*M);
+define_pd_global(intx, ProfiledCodeHeapSize,         14*M);
+define_pd_global(intx, NonMethodCodeHeapSize,        5*M );
 define_pd_global(intx, CodeCacheExpansionSize,       32*K);
 // Ergonomics related flags
 define_pd_global(uint64_t,MaxRAM,                    4ULL*G);
--- a/hotspot/src/cpu/x86/vm/c1_globals_x86.hpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/cpu/x86/vm/c1_globals_x86.hpp	Wed Sep 17 08:00:07 2014 +0200
@@ -47,6 +47,9 @@
 define_pd_global(intx, NewSizeThreadIncrease,        4*K  );
 define_pd_global(intx, InitialCodeCacheSize,         160*K);
 define_pd_global(intx, ReservedCodeCacheSize,        32*M );
+define_pd_global(intx, NonProfiledCodeHeapSize,      13*M );
+define_pd_global(intx, ProfiledCodeHeapSize,         14*M );
+define_pd_global(intx, NonMethodCodeHeapSize,        5*M  );
 define_pd_global(bool, ProfileInterpreter,           false);
 define_pd_global(intx, CodeCacheExpansionSize,       32*K );
 define_pd_global(uintx, CodeCacheMinBlockLength,     1);
--- a/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp	Wed Sep 17 08:00:07 2014 +0200
@@ -84,6 +84,9 @@
 define_pd_global(bool, OptoBundling,                 false);
 
 define_pd_global(intx, ReservedCodeCacheSize,        48*M);
+define_pd_global(intx, NonProfiledCodeHeapSize,      21*M);
+define_pd_global(intx, ProfiledCodeHeapSize,         22*M);
+define_pd_global(intx, NonMethodCodeHeapSize,        5*M );
 define_pd_global(uintx, CodeCacheMinBlockLength,     4);
 define_pd_global(uintx, CodeCacheMinimumUseSpace,    400*K);
 
--- a/hotspot/src/cpu/zero/vm/shark_globals_zero.hpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/cpu/zero/vm/shark_globals_zero.hpp	Wed Sep 17 08:00:07 2014 +0200
@@ -53,6 +53,9 @@
 define_pd_global(intx,     NewSizeThreadIncrease,        4*K  );
 define_pd_global(intx,     InitialCodeCacheSize,         160*K);
 define_pd_global(intx,     ReservedCodeCacheSize,        32*M );
+define_pd_global(intx,     NonProfiledCodeHeapSize,      13*M );
+define_pd_global(intx,     ProfiledCodeHeapSize,         14*M );
+define_pd_global(intx,     NonMethodCodeHeapSize,        5*M  );
 define_pd_global(bool,     ProfileInterpreter,           false);
 define_pd_global(intx,     CodeCacheExpansionSize,       32*K );
 define_pd_global(uintx,    CodeCacheMinBlockLength,      1    );
--- a/hotspot/src/os/bsd/dtrace/generateJvmOffsets.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/os/bsd/dtrace/generateJvmOffsets.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -67,7 +67,7 @@
  * we link this program with -z nodefs .
  *
  * But for 'debug1' and 'fastdebug1' we still have to provide
- * a particular workaround for the following symbols bellow.
+ * a particular workaround for the following symbols below.
  * It will be good to find out a generic way in the future.
  */
 
@@ -87,21 +87,24 @@
 #endif /* ASSERT */
 #endif /* COMPILER1 */
 
-#define GEN_OFFS(Type,Name)                             \
+#define GEN_OFFS_NAME(Type,Name,OutputType)             \
   switch(gen_variant) {                                 \
   case GEN_OFFSET:                                      \
-    printf("#define OFFSET_%-33s %ld\n",                 \
-           #Type #Name, offset_of(Type, Name)); \
+    printf("#define OFFSET_%-33s %ld\n",                \
+            #OutputType #Name, offset_of(Type, Name));  \
     break;                                              \
   case GEN_INDEX:                                       \
     printf("#define IDX_OFFSET_%-33s %d\n",             \
-            #Type #Name, index++);                      \
+            #OutputType #Name, index++);                \
     break;                                              \
   case GEN_TABLE:                                       \
-    printf("\tOFFSET_%s,\n", #Type #Name);              \
+    printf("\tOFFSET_%s,\n", #OutputType #Name);        \
     break;                                              \
   }
 
+#define GEN_OFFS(Type,Name)                             \
+  GEN_OFFS_NAME(Type,Name,Type)
+
 #define GEN_SIZE(Type)                                  \
   switch(gen_variant) {                                 \
   case GEN_OFFSET:                                      \
@@ -246,6 +249,11 @@
   GEN_OFFS(VirtualSpace, _high);
   printf("\n");
 
+  /* We need to use different names here because of the template parameter */
+  GEN_OFFS_NAME(GrowableArray<CodeHeap*>, _data, GrowableArray_CodeHeap);
+  GEN_OFFS_NAME(GrowableArray<CodeHeap*>, _len, GrowableArray_CodeHeap);
+  printf("\n");
+
   GEN_OFFS(CodeBlob, _name);
   GEN_OFFS(CodeBlob, _header_size);
   GEN_OFFS(CodeBlob, _content_offset);
--- a/hotspot/src/os/bsd/dtrace/jhelper.d	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/os/bsd/dtrace/jhelper.d	Wed Sep 17 08:00:07 2014 +0200
@@ -43,7 +43,9 @@
 
 extern pointer __JvmOffsets;
 
-extern pointer __1cJCodeCacheF_heap_;
+/* GrowableArray<CodeHeaps*>* */
+extern pointer __1cJCodeCacheG_heaps_;
+
 extern pointer __1cIUniverseO_collectedHeap_;
 
 extern pointer __1cHnmethodG__vtbl_;
@@ -95,8 +97,8 @@
 /!init_done && !this->done/
 {
   MARK_LINE;
-  init_done = 1;
 
+  copyin_offset(POINTER_SIZE);
   copyin_offset(COMPILER);
   copyin_offset(OFFSET_CollectedHeap_reserved);
   copyin_offset(OFFSET_MemRegion_start);
@@ -122,6 +124,9 @@
   copyin_offset(OFFSET_CodeHeap_segmap);
   copyin_offset(OFFSET_CodeHeap_log2_segment_size);
 
+  copyin_offset(OFFSET_GrowableArray_CodeHeap_data);
+  copyin_offset(OFFSET_GrowableArray_CodeHeap_len);
+
   copyin_offset(OFFSET_VirtualSpace_low);
   copyin_offset(OFFSET_VirtualSpace_high);
 
@@ -152,26 +157,14 @@
 #error "Don't know architecture"
 #endif
 
-  this->CodeCache_heap_address = copyin_ptr(&``__1cJCodeCacheF_heap_);
-
-  /* Reading volatile values */
-  this->CodeCache_low = copyin_ptr(this->CodeCache_heap_address + 
-      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
-
-  this->CodeCache_high = copyin_ptr(this->CodeCache_heap_address +
-      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+  /* Read address of GrowableArray<CodeHeaps*> */
+  this->code_heaps_address = copyin_ptr(&``__1cJCodeCacheG_heaps_);
+  /* Read address of _data array field in GrowableArray */
+  this->code_heaps_array_address = copyin_ptr(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_data);
+  this->number_of_heaps = copyin_uint32(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_len);
 
-  this->CodeCache_segmap_low = copyin_ptr(this->CodeCache_heap_address +
-      OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low);
-
-  this->CodeCache_segmap_high = copyin_ptr(this->CodeCache_heap_address +
-      OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_high);
-
-  this->CodeHeap_log2_segment_size = copyin_uint32(
-      this->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size);
-
-  this->Method_vtbl             = (pointer) &``__1cNMethodG__vtbl_;
-
+  this->Method_vtbl = (pointer) &``__1cGMethodG__vtbl_;
+  
   /*
    * Get Java heap bounds
    */
@@ -187,21 +180,152 @@
   this->heap_end = this->heap_start + this->heap_size;
 }
 
+/*
+ * IMPORTANT: At the moment the ustack helper supports up to 5 code heaps in 
+ * the code cache. If more code heaps are added the following probes have to 
+ * be extended. This is done by simply adding a probe to get the heap bounds
+ * and another probe to set the code heap address of the newly created heap.
+ */
+
+/*
+ * ----- BEGIN: Get bounds of code heaps -----
+ */
 dtrace:helper:ustack:
-/!this->done &&
-this->CodeCache_low <= this->pc && this->pc < this->CodeCache_high/
+/init_done < 1 && this->number_of_heaps >= 1 && !this->done/
+{
+  MARK_LINE;
+  /* CodeHeap 1 */
+  init_done = 1;
+  this->code_heap1_address = copyin_ptr(this->code_heaps_array_address);
+  this->code_heap1_low = copyin_ptr(this->code_heap1_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap1_high = copyin_ptr(this->code_heap1_address +
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+}
+
+dtrace:helper:ustack:
+/init_done < 2 && this->number_of_heaps >= 2 && !this->done/
+{
+  MARK_LINE;
+  /* CodeHeap 2 */
+  init_done = 2;
+  this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
+  this->code_heap2_address = copyin_ptr(this->code_heaps_array_address);
+  this->code_heap2_low = copyin_ptr(this->code_heap2_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap2_high = copyin_ptr(this->code_heap2_address +
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+}
+
+dtrace:helper:ustack:
+/init_done < 3 && this->number_of_heaps >= 3 && !this->done/
+{
+  /* CodeHeap 3 */
+  init_done = 3;
+  this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
+  this->code_heap3_address = copyin_ptr(this->code_heaps_array_address);
+  this->code_heap3_low = copyin_ptr(this->code_heap3_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap3_high = copyin_ptr(this->code_heap3_address +
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+}
+
+dtrace:helper:ustack:
+/init_done < 4 && this->number_of_heaps >= 4 && !this->done/
+{
+  /* CodeHeap 4 */
+  init_done = 4;
+  this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
+  this->code_heap4_address = copyin_ptr(this->code_heaps_array_address);
+  this->code_heap4_low = copyin_ptr(this->code_heap4_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap4_high = copyin_ptr(this->code_heap4_address +
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+}
+
+dtrace:helper:ustack:
+/init_done < 5 && this->number_of_heaps >= 5 && !this->done/
+{
+  /* CodeHeap 5 */
+  init_done = 5;
+  this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
+  this->code_heap5_address = copyin_ptr(this->code_heaps_array_address);
+  this->code_heap5_low = copyin_ptr(this->code_heap5_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap5_high = copyin_ptr(this->code_heap5_address +
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+}
+/*
+ * ----- END: Get bounds of code heaps -----
+ */
+
+/*
+ * ----- BEGIN: Get address of the code heap pc points to -----
+ */
+dtrace:helper:ustack:
+/!this->done && this->number_of_heaps >= 1 && this->code_heap1_low <= this->pc && this->pc < this->code_heap1_high/
 {
   MARK_LINE;
   this->codecache = 1;
+  this->code_heap_address = this->code_heap1_address;
+}
+
+dtrace:helper:ustack:
+/!this->done && this->number_of_heaps >= 2 && this->code_heap2_low <= this->pc && this->pc < this->code_heap2_high/
+{
+  MARK_LINE;
+  this->codecache = 1;
+  this->code_heap_address = this->code_heap2_address;
+}
+
+dtrace:helper:ustack:
+/!this->done && this->number_of_heaps >= 3 && this->code_heap3_low <= this->pc && this->pc < this->code_heap3_high/
+{
+  MARK_LINE;
+  this->codecache = 1;
+  this->code_heap_address = this->code_heap3_address;
+}
+
+dtrace:helper:ustack:
+/!this->done && this->number_of_heaps >= 4 && this->code_heap4_low <= this->pc && this->pc < this->code_heap4_high/
+{
+  MARK_LINE;
+  this->codecache = 1;
+  this->code_heap_address = this->code_heap4_address;
+}
+
+dtrace:helper:ustack:
+/!this->done && this->number_of_heaps >= 5 && this->code_heap5_low <= this->pc && this->pc < this->code_heap5_high/
+{
+  MARK_LINE;
+  this->codecache = 1;
+  this->code_heap_address = this->code_heap5_address;
+}
+/*
+ * ----- END: Get address of the code heap pc points to -----
+ */
+
+dtrace:helper:ustack:
+/!this->done && this->codecache/
+{
+  MARK_LINE;
+  /* 
+   * Get code heap configuration
+   */
+  this->code_heap_low = copyin_ptr(this->code_heap_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap_segmap_low = copyin_ptr(this->code_heap_address +
+      OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low);
+  this->code_heap_log2_segment_size = copyin_uint32(
+      this->code_heap_address + OFFSET_CodeHeap_log2_segment_size);
 
   /*
-   * Find start.
+   * Find start
    */
-  this->segment = (this->pc - this->CodeCache_low) >>
-    this->CodeHeap_log2_segment_size;
-  this->block = this->CodeCache_segmap_low;
+  this->segment = (this->pc - this->code_heap_low) >>
+    this->code_heap_log2_segment_size;
+  this->block = this->code_heap_segmap_low;
   this->tag = copyin_uchar(this->block + this->segment);
-  "second";
 }
 
 dtrace:helper:ustack:
@@ -256,8 +380,8 @@
 /!this->done && this->codecache/
 {
   MARK_LINE;
-  this->block = this->CodeCache_low +
-    (this->segment << this->CodeHeap_log2_segment_size);
+  this->block = this->code_heap_low +
+    (this->segment << this->code_heap_log2_segment_size);
   this->used = copyin_uint32(this->block + OFFSET_HeapBlockHeader_used);
 }
 
--- a/hotspot/src/os/bsd/dtrace/libjvm_db.c	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/os/bsd/dtrace/libjvm_db.c	Wed Sep 17 08:00:07 2014 +0200
@@ -150,16 +150,18 @@
   uint64_t Use_Compressed_Oops_address;
   uint64_t Universe_narrow_oop_base_address;
   uint64_t Universe_narrow_oop_shift_address;
-  uint64_t CodeCache_heap_address;
+  uint64_t CodeCache_heaps_address;
 
   /* Volatiles */
   uint8_t  Use_Compressed_Oops;
   uint64_t Universe_narrow_oop_base;
   uint32_t Universe_narrow_oop_shift;
-  uint64_t CodeCache_low;
-  uint64_t CodeCache_high;
-  uint64_t CodeCache_segmap_low;
-  uint64_t CodeCache_segmap_high;
+  // Code cache heaps
+  int32_t  Number_of_heaps;
+  uint64_t* Heap_low;
+  uint64_t* Heap_high;
+  uint64_t* Heap_segmap_low;
+  uint64_t* Heap_segmap_high;
 
   int32_t  SIZE_CodeCache_log2_segment;
 
@@ -278,8 +280,9 @@
     }
 
     if (vmp->typeName[0] == 'C' && strcmp("CodeCache", vmp->typeName) == 0) {
-      if (strcmp("_heap", vmp->fieldName) == 0) {
-        err = read_pointer(J, vmp->address, &J->CodeCache_heap_address);
+      /* Read _heaps field of type GrowableArray<CodeHeaps*>*      */
+      if (strcmp("_heaps", vmp->fieldName) == 0) {
+        err = read_pointer(J, vmp->address, &J->CodeCache_heaps_address);
       }
     } else if (vmp->typeName[0] == 'U' && strcmp("Universe", vmp->typeName) == 0) {
       if (strcmp("_narrow_oop._base", vmp->fieldName) == 0) {
@@ -318,7 +321,9 @@
 }
 
 static int read_volatiles(jvm_agent_t* J) {
-  uint64_t ptr;
+  int i;
+  uint64_t array_data;
+  uint64_t code_heap_address;
   int err;
 
   err = find_symbol(J, "UseCompressedOops", &J->Use_Compressed_Oops_address);
@@ -334,20 +339,43 @@
   err = ps_pread(J->P,  J->Universe_narrow_oop_shift_address, &J->Universe_narrow_oop_shift, sizeof(uint32_t));
   CHECK_FAIL(err);
 
-  err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
-                     OFFSET_VirtualSpace_low, &J->CodeCache_low);
-  CHECK_FAIL(err);
-  err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
-                     OFFSET_VirtualSpace_high, &J->CodeCache_high);
-  CHECK_FAIL(err);
-  err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap +
-                     OFFSET_VirtualSpace_low, &J->CodeCache_segmap_low);
-  CHECK_FAIL(err);
-  err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap +
-                     OFFSET_VirtualSpace_high, &J->CodeCache_segmap_high);
-  CHECK_FAIL(err);
+  /* CodeCache_heaps_address points to GrowableArray<CodeHeaps*>, read _data field
+     pointing to the first entry of type CodeCache* in the array */
+  err = read_pointer(J, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_data, &array_data);
+  /* Read _len field containing the number of code heaps */
+  err = ps_pread(J->P, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_len,
+                 &J->Number_of_heaps, sizeof(J->Number_of_heaps));
+
+  /* Allocate memory for heap configurations */
+  J->Heap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
+  J->Heap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
+  J->Heap_segmap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
+  J->Heap_segmap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
+
+  /* Read code heap configurations */
+  for (i = 0; i < J->Number_of_heaps; ++i) {
+    /* Read address of heap */
+    err = read_pointer(J, array_data, &code_heap_address);
+    CHECK_FAIL(err);
 
-  err = ps_pread(J->P, J->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size,
+    err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory +
+                       OFFSET_VirtualSpace_low, &J->Heap_low[i]);
+    CHECK_FAIL(err);
+    err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory +
+                       OFFSET_VirtualSpace_high, &J->Heap_high[i]);
+    CHECK_FAIL(err);
+    err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap +
+                       OFFSET_VirtualSpace_low, &J->Heap_segmap_low[i]);
+    CHECK_FAIL(err);
+    err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap +
+                       OFFSET_VirtualSpace_high, &J->Heap_segmap_high[i]);
+    CHECK_FAIL(err);
+
+    /* Increment pointer to next entry */
+    array_data = array_data + POINTER_SIZE;
+  }
+
+  err = ps_pread(J->P, code_heap_address + OFFSET_CodeHeap_log2_segment_size,
                  &J->SIZE_CodeCache_log2_segment, sizeof(J->SIZE_CodeCache_log2_segment));
   CHECK_FAIL(err);
 
@@ -357,46 +385,57 @@
   return err;
 }
 
-
-static int codecache_contains(jvm_agent_t* J, uint64_t ptr) {
-  /* make sure the code cache is up to date */
-  return (J->CodeCache_low <= ptr && ptr < J->CodeCache_high);
+static int codeheap_contains(int heap_num, jvm_agent_t* J, uint64_t ptr) {
+  return (J->Heap_low[heap_num] <= ptr && ptr < J->Heap_high[heap_num]);
 }
 
-static uint64_t segment_for(jvm_agent_t* J, uint64_t p) {
-  return (p - J->CodeCache_low) >> J->SIZE_CodeCache_log2_segment;
+static int codecache_contains(jvm_agent_t* J, uint64_t ptr) {
+  int i;
+  for (i = 0; i < J->Number_of_heaps; ++i) {
+    if (codeheap_contains(i, J, ptr)) {
+      return 1;
+    }
+  }
+  return 0;
 }
 
-static uint64_t block_at(jvm_agent_t* J, int i) {
-  return J->CodeCache_low + (i << J->SIZE_CodeCache_log2_segment);
+static uint64_t segment_for(int heap_num, jvm_agent_t* J, uint64_t p) {
+  return (p - J->Heap_low[heap_num]) >> J->SIZE_CodeCache_log2_segment;
+}
+
+static uint64_t block_at(int heap_num, jvm_agent_t* J, int i) {
+  return J->Heap_low[heap_num] + (i << J->SIZE_CodeCache_log2_segment);
 }
 
 static int find_start(jvm_agent_t* J, uint64_t ptr, uint64_t *startp) {
   int err;
+  int i;
 
-  *startp = 0;
-  if (J->CodeCache_low <= ptr && ptr < J->CodeCache_high) {
-    int32_t used;
-    uint64_t segment = segment_for(J, ptr);
-    uint64_t block = J->CodeCache_segmap_low;
-    uint8_t tag;
-    err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
-    CHECK_FAIL(err);
-    if (tag == 0xff)
-      return PS_OK;
-    while (tag > 0) {
+  for (i = 0; i < J->Number_of_heaps; ++i) {
+    *startp = 0;
+    if (codeheap_contains(i, J, ptr)) {
+      int32_t used;
+      uint64_t segment = segment_for(i, J, ptr);
+      uint64_t block = J->Heap_segmap_low[i];
+      uint8_t tag;
       err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
       CHECK_FAIL(err);
-      segment -= tag;
+      if (tag == 0xff)
+        return PS_OK;
+      while (tag > 0) {
+        err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
+        CHECK_FAIL(err);
+        segment -= tag;
+      }
+      block = block_at(i, J, segment);
+      err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used));
+      CHECK_FAIL(err);
+      if (used) {
+        *startp = block + SIZE_HeapBlockHeader;
+      }
     }
-    block = block_at(J, segment);
-    err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used));
-    CHECK_FAIL(err);
-    if (used) {
-      *startp = block + SIZE_HeapBlockHeader;
-    }
+    return PS_OK;
   }
-  return PS_OK;
 
  fail:
   return -1;
--- a/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -82,21 +82,24 @@
 #endif /* ASSERT */
 #endif /* COMPILER1 */
 
-#define GEN_OFFS(Type,Name)                             \
+#define GEN_OFFS_NAME(Type,Name,OutputType)             \
   switch(gen_variant) {                                 \
   case GEN_OFFSET:                                      \
     printf("#define OFFSET_%-33s %d\n",                 \
-            #Type #Name, offset_of(Type, Name));        \
+            #OutputType #Name, offset_of(Type, Name));  \
     break;                                              \
   case GEN_INDEX:                                       \
     printf("#define IDX_OFFSET_%-33s %d\n",             \
-            #Type #Name, index++);                      \
+            #OutputType #Name, index++);                \
     break;                                              \
   case GEN_TABLE:                                       \
-    printf("\tOFFSET_%s,\n", #Type #Name);              \
+    printf("\tOFFSET_%s,\n", #OutputType #Name);        \
     break;                                              \
   }
 
+#define GEN_OFFS(Type,Name)                             \
+  GEN_OFFS_NAME(Type,Name,Type)
+
 #define GEN_SIZE(Type)                                  \
   switch(gen_variant) {                                 \
   case GEN_OFFSET:                                      \
@@ -241,6 +244,11 @@
   GEN_OFFS(VirtualSpace, _high);
   printf("\n");
 
+  /* We need to use different names here because of the template parameter */
+  GEN_OFFS_NAME(GrowableArray<CodeHeap*>, _data, GrowableArray_CodeHeap);
+  GEN_OFFS_NAME(GrowableArray<CodeHeap*>, _len, GrowableArray_CodeHeap);
+  printf("\n");
+
   GEN_OFFS(CodeBlob, _name);
   GEN_OFFS(CodeBlob, _header_size);
   GEN_OFFS(CodeBlob, _content_offset);
--- a/hotspot/src/os/solaris/dtrace/jhelper.d	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/os/solaris/dtrace/jhelper.d	Wed Sep 17 08:00:07 2014 +0200
@@ -43,7 +43,9 @@
 
 extern pointer __JvmOffsets;
 
-extern pointer __1cJCodeCacheF_heap_;
+/* GrowableArray<CodeHeaps*>* */
+extern pointer __1cJCodeCacheG_heaps_;
+
 extern pointer __1cIUniverseO_collectedHeap_;
 
 extern pointer __1cHnmethodG__vtbl_;
@@ -95,8 +97,8 @@
 /!init_done && !this->done/
 {
   MARK_LINE;
-  init_done = 1;
-
+  
+  copyin_offset(POINTER_SIZE);
   copyin_offset(COMPILER);
   copyin_offset(OFFSET_CollectedHeap_reserved);
   copyin_offset(OFFSET_MemRegion_start);
@@ -122,6 +124,9 @@
   copyin_offset(OFFSET_CodeHeap_segmap);
   copyin_offset(OFFSET_CodeHeap_log2_segment_size);
 
+  copyin_offset(OFFSET_GrowableArray_CodeHeap_data);
+  copyin_offset(OFFSET_GrowableArray_CodeHeap_len);
+
   copyin_offset(OFFSET_VirtualSpace_low);
   copyin_offset(OFFSET_VirtualSpace_high);
 
@@ -152,24 +157,13 @@
 #error "Don't know architecture"
 #endif
 
-  this->CodeCache_heap_address = copyin_ptr(&``__1cJCodeCacheF_heap_);
-
-  this->CodeCache_low = copyin_ptr(this->CodeCache_heap_address + 
-      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
-
-  this->CodeCache_high = copyin_ptr(this->CodeCache_heap_address +
-      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+  /* Read address of GrowableArray<CodeHeaps*> */
+  this->code_heaps_address = copyin_ptr(&``__1cJCodeCacheG_heaps_);
+  /* Read address of _data array field in GrowableArray */
+  this->code_heaps_array_address = copyin_ptr(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_data);
+  this->number_of_heaps = copyin_uint32(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_len);
 
-  this->CodeCache_segmap_low = copyin_ptr(this->CodeCache_heap_address +
-      OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low);
-
-  this->CodeCache_segmap_high = copyin_ptr(this->CodeCache_heap_address +
-      OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_high);
-
-  this->CodeHeap_log2_segment_size = copyin_uint32(
-      this->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size);
-
-  this->Method_vtbl             = (pointer) &``__1cGMethodG__vtbl_;
+  this->Method_vtbl = (pointer) &``__1cGMethodG__vtbl_;
 
   /*
    * Get Java heap bounds
@@ -186,21 +180,152 @@
   this->heap_end = this->heap_start + this->heap_size;
 }
 
+/*
+ * IMPORTANT: At the moment the ustack helper supports up to 5 code heaps in 
+ * the code cache. If more code heaps are added the following probes have to 
+ * be extended. This is done by simply adding a probe to get the heap bounds
+ * and another probe to set the code heap address of the newly created heap.
+ */
+
+/*
+ * ----- BEGIN: Get bounds of code heaps -----
+ */
 dtrace:helper:ustack:
-/!this->done &&
-this->CodeCache_low <= this->pc && this->pc < this->CodeCache_high/
+/init_done < 1 && this->number_of_heaps >= 1 && !this->done/
+{
+  MARK_LINE;
+  /* CodeHeap 1 */
+  init_done = 1;
+  this->code_heap1_address = copyin_ptr(this->code_heaps_array_address);
+  this->code_heap1_low = copyin_ptr(this->code_heap1_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap1_high = copyin_ptr(this->code_heap1_address +
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+}
+
+dtrace:helper:ustack:
+/init_done < 2 && this->number_of_heaps >= 2 && !this->done/
+{
+  MARK_LINE;
+  /* CodeHeap 2 */
+  init_done = 2;
+  this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
+  this->code_heap2_address = copyin_ptr(this->code_heaps_array_address);
+  this->code_heap2_low = copyin_ptr(this->code_heap2_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap2_high = copyin_ptr(this->code_heap2_address +
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+}
+
+dtrace:helper:ustack:
+/init_done < 3 && this->number_of_heaps >= 3 && !this->done/
+{
+  /* CodeHeap 3 */
+  init_done = 3;
+  this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
+  this->code_heap3_address = copyin_ptr(this->code_heaps_array_address);
+  this->code_heap3_low = copyin_ptr(this->code_heap3_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap3_high = copyin_ptr(this->code_heap3_address +
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+}
+
+dtrace:helper:ustack:
+/init_done < 4 && this->number_of_heaps >= 4 && !this->done/
+{
+  /* CodeHeap 4 */
+  init_done = 4;
+  this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
+  this->code_heap4_address = copyin_ptr(this->code_heaps_array_address);
+  this->code_heap4_low = copyin_ptr(this->code_heap4_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap4_high = copyin_ptr(this->code_heap4_address +
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+}
+
+dtrace:helper:ustack:
+/init_done < 5 && this->number_of_heaps >= 5 && !this->done/
+{
+  /* CodeHeap 5 */
+  init_done = 5;
+  this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
+  this->code_heap5_address = copyin_ptr(this->code_heaps_array_address);
+  this->code_heap5_low = copyin_ptr(this->code_heap5_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap5_high = copyin_ptr(this->code_heap5_address +
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
+}
+/*
+ * ----- END: Get bounds of code heaps -----
+ */
+
+/*
+ * ----- BEGIN: Get address of the code heap pc points to -----
+ */
+dtrace:helper:ustack:
+/!this->done && this->number_of_heaps >= 1 && this->code_heap1_low <= this->pc && this->pc < this->code_heap1_high/
 {
   MARK_LINE;
   this->codecache = 1;
+  this->code_heap_address = this->code_heap1_address;
+}
+
+dtrace:helper:ustack:
+/!this->done && this->number_of_heaps >= 2 && this->code_heap2_low <= this->pc && this->pc < this->code_heap2_high/
+{
+  MARK_LINE;
+  this->codecache = 1;
+  this->code_heap_address = this->code_heap2_address;
+}
+
+dtrace:helper:ustack:
+/!this->done && this->number_of_heaps >= 3 && this->code_heap3_low <= this->pc && this->pc < this->code_heap3_high/
+{
+  MARK_LINE;
+  this->codecache = 1;
+  this->code_heap_address = this->code_heap3_address;
+}
+
+dtrace:helper:ustack:
+/!this->done && this->number_of_heaps >= 4 && this->code_heap4_low <= this->pc && this->pc < this->code_heap4_high/
+{
+  MARK_LINE;
+  this->codecache = 1;
+  this->code_heap_address = this->code_heap4_address;
+}
+
+dtrace:helper:ustack:
+/!this->done && this->number_of_heaps >= 5 && this->code_heap5_low <= this->pc && this->pc < this->code_heap5_high/
+{
+  MARK_LINE;
+  this->codecache = 1;
+  this->code_heap_address = this->code_heap5_address;
+}
+/*
+ * ----- END: Get address of the code heap pc points to -----
+ */
+
+dtrace:helper:ustack:
+/!this->done && this->codecache/
+{
+  MARK_LINE;
+  /* 
+   * Get code heap configuration
+   */
+  this->code_heap_low = copyin_ptr(this->code_heap_address + 
+      OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
+  this->code_heap_segmap_low = copyin_ptr(this->code_heap_address +
+      OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low);
+  this->code_heap_log2_segment_size = copyin_uint32(
+      this->code_heap_address + OFFSET_CodeHeap_log2_segment_size);
 
   /*
-   * Find start.
+   * Find start
    */
-  this->segment = (this->pc - this->CodeCache_low) >>
-    this->CodeHeap_log2_segment_size;
-  this->block = this->CodeCache_segmap_low;
+  this->segment = (this->pc - this->code_heap_low) >>
+    this->code_heap_log2_segment_size;
+  this->block = this->code_heap_segmap_low;
   this->tag = copyin_uchar(this->block + this->segment);
-  "second";
 }
 
 dtrace:helper:ustack:
@@ -255,8 +380,8 @@
 /!this->done && this->codecache/
 {
   MARK_LINE;
-  this->block = this->CodeCache_low +
-    (this->segment << this->CodeHeap_log2_segment_size);
+  this->block = this->code_heap_low +
+    (this->segment << this->code_heap_log2_segment_size);
   this->used = copyin_uint32(this->block + OFFSET_HeapBlockHeader_used);
 }
 
--- a/hotspot/src/os/solaris/dtrace/libjvm_db.c	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/os/solaris/dtrace/libjvm_db.c	Wed Sep 17 08:00:07 2014 +0200
@@ -150,16 +150,18 @@
   uint64_t Use_Compressed_Oops_address;
   uint64_t Universe_narrow_oop_base_address;
   uint64_t Universe_narrow_oop_shift_address;
-  uint64_t CodeCache_heap_address;
+  uint64_t CodeCache_heaps_address;
 
   /* Volatiles */
   uint8_t  Use_Compressed_Oops;
   uint64_t Universe_narrow_oop_base;
   uint32_t Universe_narrow_oop_shift;
-  uint64_t CodeCache_low;
-  uint64_t CodeCache_high;
-  uint64_t CodeCache_segmap_low;
-  uint64_t CodeCache_segmap_high;
+  // Code cache heaps
+  int32_t  Number_of_heaps;
+  uint64_t* Heap_low;
+  uint64_t* Heap_high;
+  uint64_t* Heap_segmap_low;
+  uint64_t* Heap_segmap_high;
 
   int32_t  SIZE_CodeCache_log2_segment;
 
@@ -278,8 +280,9 @@
     }
 
     if (vmp->typeName[0] == 'C' && strcmp("CodeCache", vmp->typeName) == 0) {
-      if (strcmp("_heap", vmp->fieldName) == 0) {
-        err = read_pointer(J, vmp->address, &J->CodeCache_heap_address);
+      /* Read _heaps field of type GrowableArray<CodeHeaps*>*      */
+      if (strcmp("_heaps", vmp->fieldName) == 0) {
+        err = read_pointer(J, vmp->address, &J->CodeCache_heaps_address);
       }
     } else if (vmp->typeName[0] == 'U' && strcmp("Universe", vmp->typeName) == 0) {
       if (strcmp("_narrow_oop._base", vmp->fieldName) == 0) {
@@ -318,7 +321,9 @@
 }
 
 static int read_volatiles(jvm_agent_t* J) {
-  uint64_t ptr;
+  int i;
+  uint64_t array_data;
+  uint64_t code_heap_address;
   int err;
 
   err = find_symbol(J, "UseCompressedOops", &J->Use_Compressed_Oops_address);
@@ -334,20 +339,43 @@
   err = ps_pread(J->P,  J->Universe_narrow_oop_shift_address, &J->Universe_narrow_oop_shift, sizeof(uint32_t));
   CHECK_FAIL(err);
 
-  err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
-                     OFFSET_VirtualSpace_low, &J->CodeCache_low);
-  CHECK_FAIL(err);
-  err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
-                     OFFSET_VirtualSpace_high, &J->CodeCache_high);
-  CHECK_FAIL(err);
-  err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap +
-                     OFFSET_VirtualSpace_low, &J->CodeCache_segmap_low);
-  CHECK_FAIL(err);
-  err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap +
-                     OFFSET_VirtualSpace_high, &J->CodeCache_segmap_high);
-  CHECK_FAIL(err);
+  /* CodeCache_heaps_address points to GrowableArray<CodeHeaps*>, read _data field
+     pointing to the first entry of type CodeCache* in the array */
+  err = read_pointer(J, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_data, &array_data);
+  /* Read _len field containing the number of code heaps */
+  err = ps_pread(J->P, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_len,
+                 &J->Number_of_heaps, sizeof(J->Number_of_heaps));
+
+  /* Allocate memory for heap configurations */
+  J->Heap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
+  J->Heap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
+  J->Heap_segmap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
+  J->Heap_segmap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
+
+  /* Read code heap configurations */
+  for (i = 0; i < J->Number_of_heaps; ++i) {
+    /* Read address of heap */
+    err = read_pointer(J, array_data, &code_heap_address);
+    CHECK_FAIL(err);
 
-  err = ps_pread(J->P, J->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size,
+    err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory +
+                       OFFSET_VirtualSpace_low, &J->Heap_low[i]);
+    CHECK_FAIL(err);
+    err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory +
+                       OFFSET_VirtualSpace_high, &J->Heap_high[i]);
+    CHECK_FAIL(err);
+    err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap +
+                       OFFSET_VirtualSpace_low, &J->Heap_segmap_low[i]);
+    CHECK_FAIL(err);
+    err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap +
+                       OFFSET_VirtualSpace_high, &J->Heap_segmap_high[i]);
+    CHECK_FAIL(err);
+
+    /* Increment pointer to next entry */
+    array_data = array_data + POINTER_SIZE;
+  }
+
+  err = ps_pread(J->P, code_heap_address + OFFSET_CodeHeap_log2_segment_size,
                  &J->SIZE_CodeCache_log2_segment, sizeof(J->SIZE_CodeCache_log2_segment));
   CHECK_FAIL(err);
 
@@ -357,46 +385,57 @@
   return err;
 }
 
-
-static int codecache_contains(jvm_agent_t* J, uint64_t ptr) {
-  /* make sure the code cache is up to date */
-  return (J->CodeCache_low <= ptr && ptr < J->CodeCache_high);
+static int codeheap_contains(int heap_num, jvm_agent_t* J, uint64_t ptr) {
+  return (J->Heap_low[heap_num] <= ptr && ptr < J->Heap_high[heap_num]);
 }
 
-static uint64_t segment_for(jvm_agent_t* J, uint64_t p) {
-  return (p - J->CodeCache_low) >> J->SIZE_CodeCache_log2_segment;
+static int codecache_contains(jvm_agent_t* J, uint64_t ptr) {
+  int i;
+  for (i = 0; i < J->Number_of_heaps; ++i) {
+    if (codeheap_contains(i, J, ptr)) {
+      return 1;
+    }
+  }
+  return 0;
 }
 
-static uint64_t block_at(jvm_agent_t* J, int i) {
-  return J->CodeCache_low + (i << J->SIZE_CodeCache_log2_segment);
+static uint64_t segment_for(int heap_num, jvm_agent_t* J, uint64_t p) {
+  return (p - J->Heap_low[heap_num]) >> J->SIZE_CodeCache_log2_segment;
+}
+
+static uint64_t block_at(int heap_num, jvm_agent_t* J, int i) {
+  return J->Heap_low[heap_num] + (i << J->SIZE_CodeCache_log2_segment);
 }
 
 static int find_start(jvm_agent_t* J, uint64_t ptr, uint64_t *startp) {
   int err;
+  int i;
 
-  *startp = 0;
-  if (J->CodeCache_low <= ptr && ptr < J->CodeCache_high) {
-    int32_t used;
-    uint64_t segment = segment_for(J, ptr);
-    uint64_t block = J->CodeCache_segmap_low;
-    uint8_t tag;
-    err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
-    CHECK_FAIL(err);
-    if (tag == 0xff)
-      return PS_OK;
-    while (tag > 0) {
+  for (i = 0; i < J->Number_of_heaps; ++i) {
+    *startp = 0;
+    if (codeheap_contains(i, J, ptr)) {
+      int32_t used;
+      uint64_t segment = segment_for(i, J, ptr);
+      uint64_t block = J->Heap_segmap_low[i];
+      uint8_t tag;
       err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
       CHECK_FAIL(err);
-      segment -= tag;
+      if (tag == 0xff)
+        return PS_OK;
+      while (tag > 0) {
+        err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
+        CHECK_FAIL(err);
+        segment -= tag;
+      }
+      block = block_at(i, J, segment);
+      err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used));
+      CHECK_FAIL(err);
+      if (used) {
+        *startp = block + SIZE_HeapBlockHeader;
+      }
     }
-    block = block_at(J, segment);
-    err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used));
-    CHECK_FAIL(err);
-    if (used) {
-      *startp = block + SIZE_HeapBlockHeader;
-    }
+    return PS_OK;
   }
-  return PS_OK;
 
  fail:
   return -1;
--- a/hotspot/src/share/vm/c1/c1_Compiler.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/c1/c1_Compiler.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -76,6 +76,11 @@
   }
 }
 
+int Compiler::code_buffer_size() {
+  assert(SegmentedCodeCache, "Should be only used with a segmented code cache");
+  return Compilation::desired_max_code_buffer_size() + Compilation::desired_max_constant_size();
+}
+
 BufferBlob* Compiler::init_buffer_blob() {
   // Allocate buffer blob once at startup since allocation for each
   // compilation seems to be too expensive (at least on Intel win32).
--- a/hotspot/src/share/vm/c1/c1_Compiler.hpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/c1/c1_Compiler.hpp	Wed Sep 17 08:00:07 2014 +0200
@@ -54,6 +54,9 @@
 
   // Print compilation timers and statistics
   virtual void print_timers();
+
+  // Size of the code buffer
+  static int code_buffer_size();
 };
 
 #endif // SHARE_VM_C1_C1_COMPILER_HPP
--- a/hotspot/src/share/vm/ci/ciEnv.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -34,6 +34,7 @@
 #include "ci/ciUtilities.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
+#include "code/codeCache.hpp"
 #include "code/scopeDesc.hpp"
 #include "compiler/compileBroker.hpp"
 #include "compiler/compileLog.hpp"
@@ -1085,7 +1086,7 @@
   } else {
     // The CodeCache is full. Print out warning and disable compilation.
     record_failure("code cache is full");
-    CompileBroker::handle_full_code_cache();
+    CompileBroker::handle_full_code_cache(CodeCache::get_code_blob_type(comp_level));
   }
 }
 
--- a/hotspot/src/share/vm/code/codeBlob.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/code/codeBlob.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -229,14 +229,11 @@
   return blob;
 }
 
-
 void* BufferBlob::operator new(size_t s, unsigned size, bool is_critical) throw() {
-  void* p = CodeCache::allocate(size, is_critical);
-  return p;
+  return CodeCache::allocate(size, CodeBlobType::NonMethod, is_critical);
 }
 
-
-void BufferBlob::free( BufferBlob *blob ) {
+void BufferBlob::free(BufferBlob *blob) {
   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
   blob->flush();
   {
@@ -299,7 +296,6 @@
   return blob;
 }
 
-
 //----------------------------------------------------------------------------------------------------
 // Implementation of RuntimeStub
 
@@ -340,14 +336,14 @@
 
 
 void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
-  void* p = CodeCache::allocate(size, true);
+  void* p = CodeCache::allocate(size, CodeBlobType::NonMethod, true);
   if (!p) fatal("Initial size of CodeCache is too small");
   return p;
 }
 
 // operator new shared by all singletons:
 void* SingletonBlob::operator new(size_t s, unsigned size) throw() {
-  void* p = CodeCache::allocate(size, true);
+  void* p = CodeCache::allocate(size, CodeBlobType::NonMethod, true);
   if (!p) fatal("Initial size of CodeCache is too small");
   return p;
 }
--- a/hotspot/src/share/vm/code/codeBlob.hpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/code/codeBlob.hpp	Wed Sep 17 08:00:07 2014 +0200
@@ -30,6 +30,18 @@
 #include "runtime/frame.hpp"
 #include "runtime/handles.hpp"
 
+// CodeBlob Types
+// Used in the CodeCache to assign CodeBlobs to different CodeHeaps
+struct CodeBlobType {
+  enum {
+    MethodNonProfiled   = 0,    // Execution level 1 and 4 (non-profiled) nmethods (including native nmethods)
+    MethodProfiled      = 1,    // Execution level 2 and 3 (profiled) nmethods
+    NonMethod           = 2,    // Non-methods like Buffers, Adapters and Runtime Stubs
+    All                 = 3,    // All types (No code cache segmentation)
+    NumTypes            = 4     // Number of CodeBlobTypes
+  };
+};
+
 // CodeBlob - superclass for all entries in the CodeCache.
 //
 // Suptypes are:
@@ -385,9 +397,6 @@
     return (pc == unpack_pc || (pc + frame::pc_return_offset) == unpack_pc);
   }
 
-
-
-
   // GC for args
   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ }
 
--- a/hotspot/src/share/vm/code/codeCache.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/code/codeCache.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -44,12 +44,20 @@
 #include "runtime/icache.hpp"
 #include "runtime/java.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/compilationPolicy.hpp"
 #include "services/memoryService.hpp"
 #include "trace/tracing.hpp"
 #include "utilities/xmlstream.hpp"
+#ifdef COMPILER1
+#include "c1/c1_Compilation.hpp"
+#include "c1/c1_Compiler.hpp"
+#endif
+#ifdef COMPILER2
+#include "opto/c2compiler.hpp"
+#include "opto/compile.hpp"
+#endif
 
 // Helper class for printing in CodeCache
-
 class CodeBlob_sizes {
  private:
   int count;
@@ -115,64 +123,215 @@
   }
 };
 
-// CodeCache implementation
+// Iterate over all CodeHeaps
+#define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
+// Iterate over all CodeBlobs (cb) on the given CodeHeap
+#define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
 
-CodeHeap * CodeCache::_heap = new CodeHeap();
+address CodeCache::_low_bound = 0;
+address CodeCache::_high_bound = 0;
 int CodeCache::_number_of_blobs = 0;
 int CodeCache::_number_of_adapters = 0;
 int CodeCache::_number_of_nmethods = 0;
 int CodeCache::_number_of_nmethods_with_dependencies = 0;
 bool CodeCache::_needs_cache_clean = false;
 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
-
 int CodeCache::_codemem_full_count = 0;
 
-CodeBlob* CodeCache::first() {
-  assert_locked_or_safepoint(CodeCache_lock);
-  return (CodeBlob*)_heap->first();
-}
+// Initialize array of CodeHeaps
+GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
+
+void CodeCache::initialize_heaps() {
+  // Determine size of compiler buffers
+  size_t code_buffers_size = 0;
+#ifdef COMPILER1
+  // C1 temporary code buffers (see Compiler::init_buffer_blob())
+  const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
+  code_buffers_size += c1_count * Compiler::code_buffer_size();
+#endif
+#ifdef COMPILER2
+  // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
+  const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
+  // Initial size of constant table (this may be increased if a compiled method needs more space)
+  code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
+#endif
 
+  // Calculate default CodeHeap sizes if not set by user
+  if (!FLAG_IS_CMDLINE(NonMethodCodeHeapSize) && !FLAG_IS_CMDLINE(ProfiledCodeHeapSize)
+      && !FLAG_IS_CMDLINE(NonProfiledCodeHeapSize)) {
+    // Increase default NonMethodCodeHeapSize to account for compiler buffers
+    FLAG_SET_ERGO(uintx, NonMethodCodeHeapSize, NonMethodCodeHeapSize + code_buffers_size);
+
+    // Check if we have enough space for the non-method code heap
+    if (ReservedCodeCacheSize > NonMethodCodeHeapSize) {
+      // Use the default value for NonMethodCodeHeapSize and one half of the
+      // remaining size for non-profiled methods and one half for profiled methods
+      size_t remaining_size = ReservedCodeCacheSize - NonMethodCodeHeapSize;
+      size_t profiled_size = remaining_size / 2;
+      size_t non_profiled_size = remaining_size - profiled_size;
+      FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
+      FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);
+    } else {
+      // Use all space for the non-method heap and set other heaps to minimal size
+      FLAG_SET_ERGO(uintx, NonMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2);
+      FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, os::vm_page_size());
+      FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, os::vm_page_size());
+    }
+  }
 
-CodeBlob* CodeCache::next(CodeBlob* cb) {
-  assert_locked_or_safepoint(CodeCache_lock);
-  return (CodeBlob*)_heap->next(cb);
-}
+  // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
+  if(!heap_available(CodeBlobType::MethodProfiled)) {
+    FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize);
+    FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
+  }
+  // We do not need the non-profiled CodeHeap, use all space for the non-method CodeHeap
+  if(!heap_available(CodeBlobType::MethodNonProfiled)) {
+    FLAG_SET_ERGO(uintx, NonMethodCodeHeapSize, NonMethodCodeHeapSize + NonProfiledCodeHeapSize);
+    FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
+  }
+
+  // Make sure we have enough space for VM internal code
+  uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace;
+  if (NonMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
+    vm_exit_during_initialization("Not enough space in non-method code heap to run VM.");
+  }
+  guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
+
+  // Align reserved sizes of CodeHeaps
+  size_t non_method_size    = ReservedCodeSpace::allocation_align_size_up(NonMethodCodeHeapSize);
+  size_t profiled_size      = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
+  size_t non_profiled_size  = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
 
+  // Compute initial sizes of CodeHeaps
+  size_t init_non_method_size   = MIN2(InitialCodeCacheSize, non_method_size);
+  size_t init_profiled_size     = MIN2(InitialCodeCacheSize, profiled_size);
+  size_t init_non_profiled_size = MIN2(InitialCodeCacheSize, non_profiled_size);
 
-CodeBlob* CodeCache::alive(CodeBlob *cb) {
-  assert_locked_or_safepoint(CodeCache_lock);
-  while (cb != NULL && !cb->is_alive()) cb = next(cb);
-  return cb;
+  // Reserve one continuous chunk of memory for CodeHeaps and split it into
+  // parts for the individual heaps. The memory layout looks like this:
+  // ---------- high -----------
+  //    Non-profiled nmethods
+  //      Profiled nmethods
+  //         Non-methods
+  // ---------- low ------------
+  ReservedCodeSpace rs = reserve_heap_memory(non_profiled_size + profiled_size + non_method_size);
+  ReservedSpace non_method_space    = rs.first_part(non_method_size);
+  ReservedSpace rest                = rs.last_part(non_method_size);
+  ReservedSpace profiled_space      = rest.first_part(profiled_size);
+  ReservedSpace non_profiled_space  = rest.last_part(profiled_size);
+
+  // Non-methods (stubs, adapters, ...)
+  add_heap(non_method_space, "non-methods", init_non_method_size, CodeBlobType::NonMethod);
+  // Tier 2 and tier 3 (profiled) methods
+  add_heap(profiled_space, "profiled nmethods", init_profiled_size, CodeBlobType::MethodProfiled);
+  // Tier 1 and tier 4 (non-profiled) methods and native methods
+  add_heap(non_profiled_space, "non-profiled nmethods", init_non_profiled_size, CodeBlobType::MethodNonProfiled);
 }
 
+ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
+  // Determine alignment
+  const size_t page_size = os::can_execute_large_page_memory() ?
+          os::page_size_for_region(InitialCodeCacheSize, size, 8) :
+          os::vm_page_size();
+  const size_t granularity = os::vm_allocation_granularity();
+  const size_t r_align = MAX2(page_size, granularity);
+  const size_t r_size = align_size_up(size, r_align);
+  const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
+    MAX2(page_size, granularity);
 
-nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
-  assert_locked_or_safepoint(CodeCache_lock);
-  while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb);
-  return (nmethod*)cb;
+  ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
+
+  // Initialize bounds
+  _low_bound = (address)rs.base();
+  _high_bound = _low_bound + rs.size();
+
+  return rs;
+}
+
+bool CodeCache::heap_available(int code_blob_type) {
+  if (!SegmentedCodeCache) {
+    // No segmentation: use a single code heap
+    return (code_blob_type == CodeBlobType::All);
+  } else if ((Arguments::mode() == Arguments::_int) ||
+             (TieredStopAtLevel == CompLevel_none)) {
+    // Interpreter only: we don't need any method code heaps
+    return (code_blob_type == CodeBlobType::NonMethod);
+  } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) {
+    // Tiered compilation: use all code heaps
+    return (code_blob_type < CodeBlobType::All);
+  } else {
+    // No TieredCompilation: we only need the non-method and non-profiled code heap
+    return (code_blob_type == CodeBlobType::NonMethod) ||
+           (code_blob_type == CodeBlobType::MethodNonProfiled);
+  }
 }
 
-nmethod* CodeCache::first_nmethod() {
-  assert_locked_or_safepoint(CodeCache_lock);
-  CodeBlob* cb = first();
-  while (cb != NULL && !cb->is_nmethod()) {
-    cb = next(cb);
+void CodeCache::add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type) {
+  // Check if heap is needed
+  if (!heap_available(code_blob_type)) {
+    return;
   }
-  return (nmethod*)cb;
+
+  // Create CodeHeap
+  CodeHeap* heap = new CodeHeap(name, code_blob_type);
+  _heaps->append(heap);
+
+  // Reserve Space
+  size_initial = round_to(size_initial, os::vm_page_size());
+
+  if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
+    vm_exit_during_initialization("Could not reserve enough space for code cache");
+  }
+
+  // Register the CodeHeap
+  MemoryService::add_code_heap_memory_pool(heap, name);
+}
+
+CodeHeap* CodeCache::get_code_heap(CodeBlob* cb) {
+  assert(cb != NULL, "CodeBlob is null");
+  FOR_ALL_HEAPS(heap) {
+    if ((*heap)->contains(cb)) {
+      return *heap;
+    }
+  }
+  ShouldNotReachHere();
+  return NULL;
 }
 
-nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
+CodeHeap* CodeCache::get_code_heap(int code_blob_type) {
+  FOR_ALL_HEAPS(heap) {
+    if ((*heap)->accepts(code_blob_type)) {
+      return *heap;
+    }
+  }
+  return NULL;
+}
+
+CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
   assert_locked_or_safepoint(CodeCache_lock);
-  cb = next(cb);
-  while (cb != NULL && !cb->is_nmethod()) {
-    cb = next(cb);
-  }
-  return (nmethod*)cb;
+  assert(heap != NULL, "heap is null");
+  return (CodeBlob*)heap->first();
 }
 
-static size_t maxCodeCacheUsed = 0;
+CodeBlob* CodeCache::first_blob(int code_blob_type) {
+  if (heap_available(code_blob_type)) {
+    return first_blob(get_code_heap(code_blob_type));
+  } else {
+    return NULL;
+  }
+}
 
-CodeBlob* CodeCache::allocate(int size, bool is_critical) {
+CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
+  assert_locked_or_safepoint(CodeCache_lock);
+  assert(heap != NULL, "heap is null");
+  return (CodeBlob*)heap->next(cb);
+}
+
+CodeBlob* CodeCache::next_blob(CodeBlob* cb) {
+  return next_blob(get_code_heap(cb), cb);
+}
+
+CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) {
   // Do not seize the CodeCache lock here--if the caller has not
   // already done so, we are going to lose bigtime, since the code
   // cache will contain a garbage CodeBlob until the caller can
@@ -184,22 +343,34 @@
     return NULL;
   }
   CodeBlob* cb = NULL;
+
+  // Get CodeHeap for the given CodeBlobType
+  CodeHeap* heap = get_code_heap(SegmentedCodeCache ? code_blob_type : CodeBlobType::All);
+  assert (heap != NULL, "heap is null");
+
   while (true) {
-    cb = (CodeBlob*)_heap->allocate(size, is_critical);
+    cb = (CodeBlob*)heap->allocate(size, is_critical);
     if (cb != NULL) break;
-    if (!_heap->expand_by(CodeCacheExpansionSize)) {
+    if (!heap->expand_by(CodeCacheExpansionSize)) {
       // Expansion failed
+      if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonMethod)) {
+        // Fallback solution: Store non-method code in the non-profiled code heap
+        return allocate(size, CodeBlobType::MethodNonProfiled, is_critical);
+      }
       return NULL;
     }
     if (PrintCodeCacheExtension) {
       ResourceMark rm;
-      tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
-                    (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(),
-                    (address)_heap->high() - (address)_heap->low_boundary());
+      if (SegmentedCodeCache) {
+        tty->print("Code heap '%s'", heap->name());
+      } else {
+        tty->print("Code cache");
+      }
+      tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
+                    (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
+                    (address)heap->high() - (address)heap->low_boundary());
     }
   }
-  maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -
-                          (address)_heap->low_boundary()) - unallocated_capacity());
   print_trace("allocation", cb, size);
   _number_of_blobs++;
   return cb;
@@ -220,12 +391,12 @@
   }
   _number_of_blobs--;
 
-  _heap->deallocate(cb);
+  // Get heap for given CodeBlob and deallocate
+  get_code_heap(cb)->deallocate(cb);
 
   assert(_number_of_blobs >= 0, "sanity check");
 }
 
-
 void CodeCache::commit(CodeBlob* cb) {
   // this is called by nmethod::nmethod, which must already own CodeCache_lock
   assert_locked_or_safepoint(CodeCache_lock);
@@ -243,89 +414,102 @@
   ICache::invalidate_range(cb->content_begin(), cb->content_size());
 }
 
-
-// Iteration over CodeBlobs
-
-#define FOR_ALL_BLOBS(var)       for (CodeBlob *var =       first() ; var != NULL; var =       next(var) )
-#define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
-#define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
-#define FOR_ALL_NMETHODS(var) for (nmethod *var = first_nmethod(); var != NULL; var = next_nmethod(var))
-
-
 bool CodeCache::contains(void *p) {
   // It should be ok to call contains without holding a lock
-  return _heap->contains(p);
+  FOR_ALL_HEAPS(heap) {
+    if ((*heap)->contains(p)) {
+      return true;
+    }
+  }
+  return false;
 }
 
-
-// This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
-// looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
+// This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not
+// looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain
 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 CodeBlob* CodeCache::find_blob(void* start) {
   CodeBlob* result = find_blob_unsafe(start);
-  if (result == NULL) return NULL;
   // We could potentially look up non_entrant methods
-  guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
+  guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
   return result;
 }
 
+// Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
+// what you are doing)
+CodeBlob* CodeCache::find_blob_unsafe(void* start) {
+  // NMT can walk the stack before code cache is created
+  if (_heaps == NULL || _heaps->is_empty()) return NULL;
+
+  FOR_ALL_HEAPS(heap) {
+    CodeBlob* result = (CodeBlob*) (*heap)->find_start(start);
+    if (result != NULL && result->blob_contains((address)start)) {
+      return result;
+    }
+  }
+  return NULL;
+}
+
 nmethod* CodeCache::find_nmethod(void* start) {
-  CodeBlob *cb = find_blob(start);
-  assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
+  CodeBlob* cb = find_blob(start);
+  assert(cb->is_nmethod(), "did not find an nmethod");
   return (nmethod*)cb;
 }
 
-
 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
   assert_locked_or_safepoint(CodeCache_lock);
-  FOR_ALL_BLOBS(p) {
-    f(p);
+  FOR_ALL_HEAPS(heap) {
+    FOR_ALL_BLOBS(cb, *heap) {
+      f(cb);
+    }
   }
 }
 
-
 void CodeCache::nmethods_do(void f(nmethod* nm)) {
   assert_locked_or_safepoint(CodeCache_lock);
-  FOR_ALL_BLOBS(nm) {
-    if (nm->is_nmethod()) f((nmethod*)nm);
+  NMethodIterator iter;
+  while(iter.next()) {
+    f(iter.method());
   }
 }
 
 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {
   assert_locked_or_safepoint(CodeCache_lock);
-  FOR_ALL_ALIVE_NMETHODS(nm) {
-    f(nm);
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    f(iter.method());
   }
 }
 
 int CodeCache::alignment_unit() {
-  return (int)_heap->alignment_unit();
+  return (int)_heaps->first()->alignment_unit();
 }
 
-
 int CodeCache::alignment_offset() {
-  return (int)_heap->alignment_offset();
+  return (int)_heaps->first()->alignment_offset();
 }
 
-
-// Mark nmethods for unloading if they contain otherwise unreachable
-// oops.
+// Mark nmethods for unloading if they contain otherwise unreachable oops.
 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
   assert_locked_or_safepoint(CodeCache_lock);
-  FOR_ALL_ALIVE_NMETHODS(nm) {
-    nm->do_unloading(is_alive, unloading_occurred);
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    iter.method()->do_unloading(is_alive, unloading_occurred);
   }
 }
 
 void CodeCache::blobs_do(CodeBlobClosure* f) {
   assert_locked_or_safepoint(CodeCache_lock);
-  FOR_ALL_ALIVE_BLOBS(cb) {
-    f->do_code_blob(cb);
+  FOR_ALL_HEAPS(heap) {
+    FOR_ALL_BLOBS(cb, *heap) {
+      if (cb->is_alive()) {
+        f->do_code_blob(cb);
 
 #ifdef ASSERT
-    if (cb->is_nmethod())
-      ((nmethod*)cb)->verify_scavenge_root_oops();
+        if (cb->is_nmethod())
+        ((nmethod*)cb)->verify_scavenge_root_oops();
 #endif //ASSERT
+      }
+    }
   }
 }
 
@@ -453,44 +637,39 @@
 
 // Temporarily mark nmethods that are claimed to be on the non-perm list.
 void CodeCache::mark_scavenge_root_nmethods() {
-  FOR_ALL_ALIVE_BLOBS(cb) {
-    if (cb->is_nmethod()) {
-      nmethod *nm = (nmethod*)cb;
-      assert(nm->scavenge_root_not_marked(), "clean state");
-      if (nm->on_scavenge_root_list())
-        nm->set_scavenge_root_marked();
-    }
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* nm = iter.method();
+    assert(nm->scavenge_root_not_marked(), "clean state");
+    if (nm->on_scavenge_root_list())
+      nm->set_scavenge_root_marked();
   }
 }
 
 // If the closure is given, run it on the unlisted nmethods.
 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
-  FOR_ALL_ALIVE_BLOBS(cb) {
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* nm = iter.method();
     bool call_f = (f_or_null != NULL);
-    if (cb->is_nmethod()) {
-      nmethod *nm = (nmethod*)cb;
-      assert(nm->scavenge_root_not_marked(), "must be already processed");
-      if (nm->on_scavenge_root_list())
-        call_f = false;  // don't show this one to the client
-      nm->verify_scavenge_root_oops();
-    } else {
-      call_f = false;   // not an nmethod
-    }
-    if (call_f)  f_or_null->do_code_blob(cb);
+    assert(nm->scavenge_root_not_marked(), "must be already processed");
+    if (nm->on_scavenge_root_list())
+      call_f = false;  // don't show this one to the client
+    nm->verify_scavenge_root_oops();
+    if (call_f)  f_or_null->do_code_blob(nm);
   }
 }
 #endif //PRODUCT
 
 void CodeCache::verify_clean_inline_caches() {
 #ifdef ASSERT
-  FOR_ALL_ALIVE_BLOBS(cb) {
-    if (cb->is_nmethod()) {
-      nmethod* nm = (nmethod*)cb;
-      assert(!nm->is_unloaded(), "Tautology");
-      nm->verify_clean_inline_caches();
-      nm->verify();
-    }
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* nm = iter.method();
+    assert(!nm->is_unloaded(), "Tautology");
+    nm->verify_clean_inline_caches();
+    nm->verify();
   }
 #endif
 }
@@ -499,10 +678,12 @@
 #ifdef ASSERT
   // make sure that we aren't leaking icholders
   int count = 0;
-  FOR_ALL_BLOBS(cb) {
-    if (cb->is_nmethod()) {
-      nmethod* nm = (nmethod*)cb;
-      count += nm->verify_icholder_relocations();
+  FOR_ALL_HEAPS(heap) {
+    FOR_ALL_BLOBS(cb, *heap) {
+      if (cb->is_nmethod()) {
+        nmethod* nm = (nmethod*)cb;
+        count += nm->verify_icholder_relocations();
+      }
     }
   }
 
@@ -516,16 +697,15 @@
 
 void CodeCache::gc_epilogue() {
   assert_locked_or_safepoint(CodeCache_lock);
-  FOR_ALL_ALIVE_BLOBS(cb) {
-    if (cb->is_nmethod()) {
-      nmethod *nm = (nmethod*)cb;
-      assert(!nm->is_unloaded(), "Tautology");
-      if (needs_cache_clean()) {
-        nm->cleanup_inline_caches();
-      }
-      DEBUG_ONLY(nm->verify());
-      DEBUG_ONLY(nm->verify_oop_relocations());
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* nm = iter.method();
+    assert(!nm->is_unloaded(), "Tautology");
+    if (needs_cache_clean()) {
+      nm->cleanup_inline_caches();
     }
+    DEBUG_ONLY(nm->verify());
+    DEBUG_ONLY(nm->verify_oop_relocations());
   }
   set_needs_cache_clean(false);
   prune_scavenge_root_nmethods();
@@ -536,37 +716,89 @@
 void CodeCache::verify_oops() {
   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   VerifyOopClosure voc;
-  FOR_ALL_ALIVE_BLOBS(cb) {
-    if (cb->is_nmethod()) {
-      nmethod *nm = (nmethod*)cb;
-      nm->oops_do(&voc);
-      nm->verify_oop_relocations();
-    }
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* nm = iter.method();
+    nm->oops_do(&voc);
+    nm->verify_oop_relocations();
   }
 }
 
-
-address CodeCache::first_address() {
-  assert_locked_or_safepoint(CodeCache_lock);
-  return (address)_heap->low_boundary();
+size_t CodeCache::capacity() {
+  size_t cap = 0;
+  FOR_ALL_HEAPS(heap) {
+    cap += (*heap)->capacity();
+  }
+  return cap;
 }
 
+size_t CodeCache::unallocated_capacity() {
+  size_t unallocated_cap = 0;
+  FOR_ALL_HEAPS(heap) {
+    unallocated_cap += (*heap)->unallocated_capacity();
+  }
+  return unallocated_cap;
+}
 
-address CodeCache::last_address() {
-  assert_locked_or_safepoint(CodeCache_lock);
-  return (address)_heap->high();
+size_t CodeCache::max_capacity() {
+  size_t max_cap = 0;
+  FOR_ALL_HEAPS(heap) {
+    max_cap += (*heap)->max_capacity();
+  }
+  return max_cap;
 }
 
 /**
- * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
+ * Returns true if a CodeHeap is full and sets code_blob_type accordingly.
+ */
+bool CodeCache::is_full(int* code_blob_type) {
+  FOR_ALL_HEAPS(heap) {
+    if ((*heap)->unallocated_capacity() < CodeCacheMinimumFreeSpace) {
+      *code_blob_type = (*heap)->code_blob_type();
+      return true;
+    }
+  }
+  return false;
+}
+
+/**
+ * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
  * is free, reverse_free_ratio() returns 4.
  */
-double CodeCache::reverse_free_ratio() {
-  double unallocated_capacity = (double)(CodeCache::unallocated_capacity() - CodeCacheMinimumFreeSpace);
-  double max_capacity = (double)CodeCache::max_capacity();
+double CodeCache::reverse_free_ratio(int code_blob_type) {
+  CodeHeap* heap = get_code_heap(code_blob_type);
+  if (heap == NULL) {
+    return 0;
+  }
+  double unallocated_capacity = (double)(heap->unallocated_capacity() - CodeCacheMinimumFreeSpace);
+  double max_capacity = (double)heap->max_capacity();
   return max_capacity / unallocated_capacity;
 }
 
+size_t CodeCache::bytes_allocated_in_freelists() {
+  size_t allocated_bytes = 0;
+  FOR_ALL_HEAPS(heap) {
+    allocated_bytes += (*heap)->allocated_in_freelist();
+  }
+  return allocated_bytes;
+}
+
+int CodeCache::allocated_segments() {
+  int number_of_segments = 0;
+  FOR_ALL_HEAPS(heap) {
+    number_of_segments += (*heap)->allocated_segments();
+  }
+  return number_of_segments;
+}
+
+size_t CodeCache::freelists_length() {
+  size_t length = 0;
+  FOR_ALL_HEAPS(heap) {
+    length += (*heap)->freelist_length();
+  }
+  return length;
+}
+
 void icache_init();
 
 void CodeCache::initialize() {
@@ -579,14 +811,16 @@
   // the code cache to the page size.  In particular, Solaris is moving to a larger
   // default page size.
   CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
-  InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size());
-  ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size());
-  if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) {
-    vm_exit_during_initialization("Could not reserve enough space for code cache");
+
+  if (SegmentedCodeCache) {
+    // Use multiple code heaps
+    initialize_heaps();
+  } else {
+    // Use a single code heap
+    ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
+    add_heap(rs, "Code heap", InitialCodeCacheSize, CodeBlobType::All);
   }
 
-  MemoryService::add_code_heap_memory_pool(_heap);
-
   // Initialize ICache flush mechanism
   // This service is needed for os::register_code_area
   icache_init();
@@ -594,10 +828,9 @@
   // Give OS a chance to register generated code area.
   // This is used on Windows 64 bit platforms to register
   // Structured Exception Handlers for our generated code.
-  os::register_code_area(_heap->low_boundary(), _heap->high_boundary());
+  os::register_code_area((char*)low_bound(), (char*)high_bound());
 }
 
-
 void codeCache_init() {
   CodeCache::initialize();
 }
@@ -610,8 +843,9 @@
 
 void CodeCache::clear_inline_caches() {
   assert_locked_or_safepoint(CodeCache_lock);
-  FOR_ALL_ALIVE_NMETHODS(nm) {
-    nm->clear_inline_caches();
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    iter.method()->clear_inline_caches();
   }
 }
 
@@ -666,7 +900,9 @@
     }
   }
 
-  FOR_ALL_ALIVE_NMETHODS(nm) {
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* nm = iter.method();
     if (nm->is_marked_for_deoptimization()) {
       // ...Already marked in the previous pass; don't count it again.
     } else if (nm->is_evol_dependent_on(dependee())) {
@@ -687,19 +923,22 @@
 // Deoptimize all methods
 void CodeCache::mark_all_nmethods_for_deoptimization() {
   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-  FOR_ALL_ALIVE_NMETHODS(nm) {
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* nm = iter.method();
     if (!nm->method()->is_method_handle_intrinsic()) {
       nm->mark_for_deoptimization();
     }
   }
 }
 
-
 int CodeCache::mark_for_deoptimization(Method* dependee) {
   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   int number_of_marked_CodeBlobs = 0;
 
-  FOR_ALL_ALIVE_NMETHODS(nm) {
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* nm = iter.method();
     if (nm->is_dependent_on_method(dependee)) {
       ResourceMark rm;
       nm->mark_for_deoptimization();
@@ -712,7 +951,9 @@
 
 void CodeCache::make_marked_nmethods_zombies() {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
-  FOR_ALL_ALIVE_NMETHODS(nm) {
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* nm = iter.method();
     if (nm->is_marked_for_deoptimization()) {
 
       // If the nmethod has already been made non-entrant and it can be converted
@@ -733,7 +974,9 @@
 
 void CodeCache::make_marked_nmethods_not_entrant() {
   assert_locked_or_safepoint(CodeCache_lock);
-  FOR_ALL_ALIVE_NMETHODS(nm) {
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* nm = iter.method();
     if (nm->is_marked_for_deoptimization()) {
       nm->make_not_entrant();
     }
@@ -741,23 +984,55 @@
 }
 
 void CodeCache::verify() {
-  _heap->verify();
-  FOR_ALL_ALIVE_BLOBS(p) {
-    p->verify();
+  assert_locked_or_safepoint(CodeCache_lock);
+  FOR_ALL_HEAPS(heap) {
+    (*heap)->verify();
+    FOR_ALL_BLOBS(cb, *heap) {
+      if (cb->is_alive()) {
+        cb->verify();
+      }
+    }
   }
 }
 
-void CodeCache::report_codemem_full() {
+// A CodeHeap is full. Print out warning and report event.
+void CodeCache::report_codemem_full(int code_blob_type, bool print) {
+  // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
+  CodeHeap* heap = get_code_heap(SegmentedCodeCache ? code_blob_type : CodeBlobType::All);
+
+  if (!heap->was_full() || print) {
+    // Not yet reported for this heap, report
+    heap->report_full();
+    if (SegmentedCodeCache) {
+      warning("CodeHeap for %s is full. Compiler has been disabled.", CodeCache::get_code_heap_name(code_blob_type));
+      warning("Try increasing the code heap size using -XX:%s=",
+          (code_blob_type == CodeBlobType::MethodNonProfiled) ? "NonProfiledCodeHeapSize" : "ProfiledCodeHeapSize");
+    } else {
+      warning("CodeCache is full. Compiler has been disabled.");
+      warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
+    }
+    ResourceMark rm;
+    stringStream s;
+    // Dump code cache  into a buffer before locking the tty,
+    {
+      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+      print_summary(&s);
+    }
+    ttyLocker ttyl;
+    tty->print("%s", s.as_string());
+  }
+
   _codemem_full_count++;
   EventCodeCacheFull event;
   if (event.should_commit()) {
-    event.set_startAddress((u8)low_bound());
-    event.set_commitedTopAddress((u8)high());
-    event.set_reservedTopAddress((u8)high_bound());
+    event.set_codeBlobType((u1)code_blob_type);
+    event.set_startAddress((u8)heap->low_boundary());
+    event.set_commitedTopAddress((u8)heap->high());
+    event.set_reservedTopAddress((u8)heap->high_boundary());
     event.set_entryCount(nof_blobs());
     event.set_methodCount(nof_nmethods());
     event.set_adaptorCount(nof_adapters());
-    event.set_unallocatedCapacity(unallocated_capacity()/K);
+    event.set_unallocatedCapacity(heap->unallocated_capacity()/K);
     event.set_fullCount(_codemem_full_count);
     event.commit();
   }
@@ -765,15 +1040,17 @@
 
 void CodeCache::print_memory_overhead() {
   size_t wasted_bytes = 0;
-  CodeBlob *cb;
-  for (cb = first(); cb != NULL; cb = next(cb)) {
-    HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
-    wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
+  FOR_ALL_HEAPS(heap) {
+      CodeHeap* curr_heap = *heap;
+      for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) {
+        HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
+        wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
+      }
   }
   // Print bytes that are allocated in the freelist
   ttyLocker ttl;
-  tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT,    freelist_length());
-  tty->print_cr("Allocated in freelist:          " SSIZE_FORMAT "kB",  bytes_allocated_in_freelist()/K);
+  tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT,       freelists_length());
+  tty->print_cr("Allocated in freelist:          " SSIZE_FORMAT "kB",  bytes_allocated_in_freelists()/K);
   tty->print_cr("Unused bytes in CodeBlobs:      " SSIZE_FORMAT "kB",  (wasted_bytes/K));
   tty->print_cr("Segment map size:               " SSIZE_FORMAT "kB",  allocated_segments()/K); // 1 byte per segment
 }
@@ -808,43 +1085,48 @@
   int max_nm_size = 0;
   ResourceMark rm;
 
-  CodeBlob *cb;
-  for (cb = first(); cb != NULL; cb = next(cb)) {
-    total++;
-    if (cb->is_nmethod()) {
-      nmethod* nm = (nmethod*)cb;
+  int i = 0;
+  FOR_ALL_HEAPS(heap) {
+    if (SegmentedCodeCache && Verbose) {
+      tty->print_cr("-- Code heap '%s' --", (*heap)->name());
+    }
+    FOR_ALL_BLOBS(cb, *heap) {
+      total++;
+      if (cb->is_nmethod()) {
+        nmethod* nm = (nmethod*)cb;
 
-      if (Verbose && nm->method() != NULL) {
-        ResourceMark rm;
-        char *method_name = nm->method()->name_and_sig_as_C_string();
-        tty->print("%s", method_name);
-        if(nm->is_alive()) { tty->print_cr(" alive"); }
-        if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
-        if(nm->is_zombie()) { tty->print_cr(" zombie"); }
-      }
+        if (Verbose && nm->method() != NULL) {
+          ResourceMark rm;
+          char *method_name = nm->method()->name_and_sig_as_C_string();
+          tty->print("%s", method_name);
+          if(nm->is_alive()) { tty->print_cr(" alive"); }
+          if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
+          if(nm->is_zombie()) { tty->print_cr(" zombie"); }
+        }
 
-      nmethodCount++;
+        nmethodCount++;
 
-      if(nm->is_alive()) { nmethodAlive++; }
-      if(nm->is_not_entrant()) { nmethodNotEntrant++; }
-      if(nm->is_zombie()) { nmethodZombie++; }
-      if(nm->is_unloaded()) { nmethodUnloaded++; }
-      if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
+        if(nm->is_alive()) { nmethodAlive++; }
+        if(nm->is_not_entrant()) { nmethodNotEntrant++; }
+        if(nm->is_zombie()) { nmethodZombie++; }
+        if(nm->is_unloaded()) { nmethodUnloaded++; }
+        if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
 
-      if(nm->method() != NULL && nm->is_java_method()) {
-        nmethodJava++;
-        max_nm_size = MAX2(max_nm_size, nm->size());
+        if(nm->method() != NULL && nm->is_java_method()) {
+          nmethodJava++;
+          max_nm_size = MAX2(max_nm_size, nm->size());
+        }
+      } else if (cb->is_runtime_stub()) {
+        runtimeStubCount++;
+      } else if (cb->is_deoptimization_stub()) {
+        deoptimizationStubCount++;
+      } else if (cb->is_uncommon_trap_stub()) {
+        uncommonTrapStubCount++;
+      } else if (cb->is_adapter_blob()) {
+        adapterCount++;
+      } else if (cb->is_buffer_blob()) {
+        bufferBlobCount++;
       }
-    } else if (cb->is_runtime_stub()) {
-      runtimeStubCount++;
-    } else if (cb->is_deoptimization_stub()) {
-      deoptimizationStubCount++;
-    } else if (cb->is_uncommon_trap_stub()) {
-      uncommonTrapStubCount++;
-    } else if (cb->is_adapter_blob()) {
-      adapterCount++;
-    } else if (cb->is_buffer_blob()) {
-      bufferBlobCount++;
     }
   }
 
@@ -853,12 +1135,11 @@
   int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
   memset(buckets, 0, sizeof(int) * bucketLimit);
 
-  for (cb = first(); cb != NULL; cb = next(cb)) {
-    if (cb->is_nmethod()) {
-      nmethod* nm = (nmethod*)cb;
-      if(nm->is_java_method()) {
-        buckets[nm->size() / bucketSize]++;
-       }
+  NMethodIterator iter;
+  while(iter.next()) {
+    nmethod* nm = iter.method();
+    if(nm->method() != NULL && nm->is_java_method()) {
+      buckets[nm->size() / bucketSize]++;
     }
   }
 
@@ -902,11 +1183,13 @@
   CodeBlob_sizes live;
   CodeBlob_sizes dead;
 
-  FOR_ALL_BLOBS(p) {
-    if (!p->is_alive()) {
-      dead.add(p);
-    } else {
-      live.add(p);
+  FOR_ALL_HEAPS(heap) {
+    FOR_ALL_BLOBS(cb, *heap) {
+      if (!cb->is_alive()) {
+        dead.add(cb);
+      } else {
+        live.add(cb);
+      }
     }
   }
 
@@ -920,21 +1203,22 @@
     dead.print("dead");
   }
 
-
   if (WizardMode) {
      // print the oop_map usage
     int code_size = 0;
     int number_of_blobs = 0;
     int number_of_oop_maps = 0;
     int map_size = 0;
-    FOR_ALL_BLOBS(p) {
-      if (p->is_alive()) {
-        number_of_blobs++;
-        code_size += p->code_size();
-        OopMapSet* set = p->oop_maps();
-        if (set != NULL) {
-          number_of_oop_maps += set->size();
-          map_size           += set->heap_size();
+    FOR_ALL_HEAPS(heap) {
+      FOR_ALL_BLOBS(cb, *heap) {
+        if (cb->is_alive()) {
+          number_of_blobs++;
+          code_size += cb->code_size();
+          OopMapSet* set = cb->oop_maps();
+          if (set != NULL) {
+            number_of_oop_maps += set->size();
+            map_size           += set->heap_size();
+          }
         }
       }
     }
@@ -949,20 +1233,31 @@
 }
 
 void CodeCache::print_summary(outputStream* st, bool detailed) {
-  size_t total = (_heap->high_boundary() - _heap->low_boundary());
-  st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
-               "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
-               total/K, (total - unallocated_capacity())/K,
-               maxCodeCacheUsed/K, unallocated_capacity()/K);
+  FOR_ALL_HEAPS(heap_iterator) {
+    CodeHeap* heap = (*heap_iterator);
+    size_t total = (heap->high_boundary() - heap->low_boundary());
+    if (SegmentedCodeCache) {
+      st->print("CodeHeap '%s':", heap->name());
+    } else {
+      st->print("CodeCache:");
+    }
+    st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
+                 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
+                 total/K, (total - heap->unallocated_capacity())/K,
+                 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K);
+
+    if (detailed) {
+      st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
+                   p2i(heap->low_boundary()),
+                   p2i(heap->high()),
+                   p2i(heap->high_boundary()));
+    }
+  }
 
   if (detailed) {
-    st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
-                 p2i(_heap->low_boundary()),
-                 p2i(_heap->high()),
-                 p2i(_heap->high_boundary()));
     st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
-                 " adapters=" UINT32_FORMAT,
-                 nof_blobs(), nof_nmethods(), nof_adapters());
+                       " adapters=" UINT32_FORMAT,
+                       nof_blobs(), nof_nmethods(), nof_adapters());
     st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
                  "enabled" : Arguments::mode() == Arguments::_int ?
                  "disabled (interpreter mode)" :
@@ -973,12 +1268,14 @@
 void CodeCache::print_codelist(outputStream* st) {
   assert_locked_or_safepoint(CodeCache_lock);
 
-  FOR_ALL_NMETHODS(p) {
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* nm = iter.method();
     ResourceMark rm;
-    char *method_name = p->method()->name_and_sig_as_C_string();
+    char *method_name = nm->method()->name_and_sig_as_C_string();
     st->print_cr("%d %d %s ["INTPTR_FORMAT", "INTPTR_FORMAT" - "INTPTR_FORMAT"]",
-                 p->compile_id(), p->comp_level(), method_name, (intptr_t)p->header_begin(),
-                 (intptr_t)p->code_begin(), (intptr_t)p->code_end());
+                 nm->compile_id(), nm->comp_level(), method_name, (intptr_t)nm->header_begin(),
+                 (intptr_t)nm->code_begin(), (intptr_t)nm->code_end());
   }
 }
 
@@ -995,4 +1292,3 @@
             nof_blobs(), nof_nmethods(), nof_adapters(),
             unallocated_capacity());
 }
-
--- a/hotspot/src/share/vm/code/codeCache.hpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/code/codeCache.hpp	Wed Sep 17 08:00:07 2014 +0200
@@ -26,105 +26,117 @@
 #define SHARE_VM_CODE_CODECACHE_HPP
 
 #include "code/codeBlob.hpp"
+#include "code/nmethod.hpp"
 #include "memory/allocation.hpp"
 #include "memory/heap.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/oopsHierarchy.hpp"
+#include "runtime/mutexLocker.hpp"
 
 // The CodeCache implements the code cache for various pieces of generated
 // code, e.g., compiled java methods, runtime stubs, transition frames, etc.
 // The entries in the CodeCache are all CodeBlob's.
 
-// Implementation:
-//   - Each CodeBlob occupies one chunk of memory.
-//   - Like the offset table in oldspace the zone has at table for
-//     locating a method given a addess of an instruction.
+// -- Implementation --
+// The CodeCache consists of one or more CodeHeaps, each of which contains
+// CodeBlobs of a specific CodeBlobType. Currently heaps for the following
+// types are available:
+//  - Non-methods: Non-methods like Buffers, Adapters and Runtime Stubs
+//  - Profiled nmethods: nmethods that are profiled, i.e., those
+//    executed at level 2 or 3
+//  - Non-Profiled nmethods: nmethods that are not profiled, i.e., those
+//    executed at level 1 or 4 and native methods
+//  - All: Used for code of all types if code cache segmentation is disabled.
+//
+// In the rare case of the non-method code heap getting full, non-method code
+// will be stored in the non-profiled code heap as a fallback solution.
+//
+// Depending on the availability of compilers and TieredCompilation there
+// may be fewer heaps. The size of the code heaps depends on the values of
+// ReservedCodeCacheSize, NonProfiledCodeHeapSize and ProfiledCodeHeapSize
+// (see CodeCache::heap_available(..) and CodeCache::initialize_heaps(..)
+// for details).
+//
+// Code cache segmentation is controlled by the flag SegmentedCodeCache.
+// If turned off, all code types are stored in a single code heap. By default
+// code cache segmentation is turned on if TieredCompilation is enabled and
+// ReservedCodeCacheSize >= 240 MB.
+//
+// All methods of the CodeCache accepting a CodeBlobType only apply to
+// CodeBlobs of the given type. For example, iteration over the
+// CodeBlobs of a specific type can be done by using CodeCache::first_blob(..)
+// and CodeCache::next_blob(..) and providing the corresponding CodeBlobType.
+//
+// IMPORTANT: If you add new CodeHeaps to the code cache or change the
+// existing ones, make sure to adapt the dtrace scripts (jhelper.d) for
+// Solaris and BSD.
 
 class OopClosure;
 class DepChange;
 
 class CodeCache : AllStatic {
   friend class VMStructs;
+  friend class NMethodIterator;
  private:
-  // CodeHeap is malloc()'ed at startup and never deleted during shutdown,
-  // so that the generated assembly code is always there when it's needed.
-  // This may cause memory leak, but is necessary, for now. See 4423824,
-  // 4422213 or 4436291 for details.
-  static CodeHeap * _heap;
-  static int _number_of_blobs;
-  static int _number_of_adapters;
-  static int _number_of_nmethods;
-  static int _number_of_nmethods_with_dependencies;
-  static bool _needs_cache_clean;
-  static nmethod* _scavenge_root_nmethods;  // linked via nm->scavenge_root_link()
+  // CodeHeaps of the cache
+  static GrowableArray<CodeHeap*>* _heaps;
+
+  static address _low_bound;                            // Lower bound of CodeHeap addresses
+  static address _high_bound;                           // Upper bound of CodeHeap addresses
+  static int _number_of_blobs;                          // Total number of CodeBlobs in the cache
+  static int _number_of_adapters;                       // Total number of Adapters in the cache
+  static int _number_of_nmethods;                       // Total number of nmethods in the cache
+  static int _number_of_nmethods_with_dependencies;     // Total number of nmethods with dependencies
+  static bool _needs_cache_clean;                       // True if inline caches of the nmethods needs to be flushed
+  static nmethod* _scavenge_root_nmethods;              // linked via nm->scavenge_root_link()
+  static int _codemem_full_count;                       // Number of times a CodeHeap in the cache was full
 
   static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
   static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
 
-  static int _codemem_full_count;
-  static size_t bytes_allocated_in_freelist() { return _heap->allocated_in_freelist(); }
-  static int    allocated_segments()          { return _heap->allocated_segments(); }
-  static size_t freelist_length()             { return _heap->freelist_length(); }
+  // CodeHeap management
+  static void initialize_heaps();                             // Initializes the CodeHeaps
+  // Creates a new heap with the given name and size, containing CodeBlobs of the given type
+  static void add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type);
+  static CodeHeap* get_code_heap(CodeBlob* cb);               // Returns the CodeHeap for the given CodeBlob
+  static CodeHeap* get_code_heap(int code_blob_type);         // Returns the CodeHeap for the given CodeBlobType
+  static bool heap_available(int code_blob_type);             // Returns true if a CodeHeap for the given CodeBlobType is available
+  static ReservedCodeSpace reserve_heap_memory(size_t size);  // Reserves one continuous chunk of memory for the CodeHeaps
+
+  // Iteration
+  static CodeBlob* first_blob(CodeHeap* heap);                // Returns the first CodeBlob on the given CodeHeap
+  static CodeBlob* first_blob(int code_blob_type);            // Returns the first CodeBlob of the given type
+  static CodeBlob* next_blob(CodeHeap* heap, CodeBlob* cb);   // Returns the first alive CodeBlob on the given CodeHeap
+  static CodeBlob* next_blob(CodeBlob* cb);                   // Returns the next CodeBlob of the given type succeeding the given CodeBlob
+
+  static size_t bytes_allocated_in_freelists();
+  static int    allocated_segments();
+  static size_t freelists_length();
 
  public:
-
   // Initialization
   static void initialize();
 
-  static void report_codemem_full();
-
   // Allocation/administration
-  static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob
-  static void commit(CodeBlob* cb);                 // called when the allocated CodeBlob has been filled
-  static int alignment_unit();                      // guaranteed alignment of all CodeBlobs
-  static int alignment_offset();                    // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
-  static void free(CodeBlob* cb);                   // frees a CodeBlob
-  static bool contains(void *p);                    // returns whether p is included
-  static void blobs_do(void f(CodeBlob* cb));       // iterates over all CodeBlobs
-  static void blobs_do(CodeBlobClosure* f);         // iterates over all CodeBlobs
-  static void nmethods_do(void f(nmethod* nm));     // iterates over all nmethods
-  static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods
+  static CodeBlob* allocate(int size, int code_blob_type, bool is_critical = false); // allocates a new CodeBlob
+  static void commit(CodeBlob* cb);                     // called when the allocated CodeBlob has been filled
+  static int  alignment_unit();                         // guaranteed alignment of all CodeBlobs
+  static int  alignment_offset();                       // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
+  static void free(CodeBlob* cb);                       // frees a CodeBlob
+  static bool contains(void *p);                        // returns whether p is included
+  static void blobs_do(void f(CodeBlob* cb));           // iterates over all CodeBlobs
+  static void blobs_do(CodeBlobClosure* f);             // iterates over all CodeBlobs
+  static void nmethods_do(void f(nmethod* nm));         // iterates over all nmethods
+  static void alive_nmethods_do(void f(nmethod* nm));   // iterates over all alive nmethods
 
   // Lookup
-  static CodeBlob* find_blob(void* start);
-  static nmethod*  find_nmethod(void* start);
-
-  // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
-  // what you are doing)
-  static CodeBlob* find_blob_unsafe(void* start) {
-    // NMT can walk the stack before code cache is created
-    if (_heap == NULL) return NULL;
+  static CodeBlob* find_blob(void* start);              // Returns the CodeBlob containing the given address
+  static CodeBlob* find_blob_unsafe(void* start);       // Same as find_blob but does not fail if looking up a zombie method
+  static nmethod*  find_nmethod(void* start);           // Returns the nmethod containing the given address
 
-    CodeBlob* result = (CodeBlob*)_heap->find_start(start);
-    // this assert is too strong because the heap code will return the
-    // heapblock containing start. That block can often be larger than
-    // the codeBlob itself. If you look up an address that is within
-    // the heapblock but not in the codeBlob you will assert.
-    //
-    // Most things will not lookup such bad addresses. However
-    // AsyncGetCallTrace can see intermediate frames and get that kind
-    // of invalid address and so can a developer using hsfind.
-    //
-    // The more correct answer is to return NULL if blob_contains() returns
-    // false.
-    // assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob");
-
-    if (result != NULL && !result->blob_contains((address)start)) {
-      result = NULL;
-    }
-    return result;
-  }
-
-  // Iteration
-  static CodeBlob* first();
-  static CodeBlob* next (CodeBlob* cb);
-  static CodeBlob* alive(CodeBlob *cb);
-  static nmethod* alive_nmethod(CodeBlob *cb);
-  static nmethod* first_nmethod();
-  static nmethod* next_nmethod (CodeBlob* cb);
-  static int       nof_blobs()                 { return _number_of_blobs; }
-  static int       nof_adapters()              { return _number_of_adapters; }
-  static int       nof_nmethods()              { return _number_of_nmethods; }
+  static int       nof_blobs()      { return _number_of_blobs; }      // Returns the total number of CodeBlobs in the cache
+  static int       nof_adapters()   { return _number_of_adapters; }   // Returns the total number of Adapters in the cache
+  static int       nof_nmethods()   { return _number_of_nmethods; }   // Returns the total number of nmethods in the cache
 
   // GC support
   static void gc_epilogue();
@@ -137,7 +149,7 @@
   static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
   static void scavenge_root_nmethods_do(CodeBlobClosure* f);
 
-  static nmethod* scavenge_root_nmethods()          { return _scavenge_root_nmethods; }
+  static nmethod* scavenge_root_nmethods()            { return _scavenge_root_nmethods; }
   static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }
   static void add_scavenge_root_nmethod(nmethod* nm);
   static void drop_scavenge_root_nmethod(nmethod* nm);
@@ -151,27 +163,47 @@
   static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN;
   static void print_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage
   static void log_state(outputStream* st);
+  static const char* get_code_heap_name(int code_blob_type)  { return (heap_available(code_blob_type) ? get_code_heap(code_blob_type)->name() : "Unused"); }
+  static void report_codemem_full(int code_blob_type, bool print);
 
   // Dcmd (Diagnostic commands)
   static void print_codelist(outputStream* st);
   static void print_layout(outputStream* st);
 
   // The full limits of the codeCache
-  static address  low_bound()                    { return (address) _heap->low_boundary(); }
-  static address  high_bound()                   { return (address) _heap->high_boundary(); }
-  static address  high()                         { return (address) _heap->high(); }
+  static address low_bound()                          { return _low_bound; }
+  static address high_bound()                         { return _high_bound; }
 
   // Profiling
-  static address first_address();                // first address used for CodeBlobs
-  static address last_address();                 // last  address used for CodeBlobs
-  static size_t  capacity()                      { return _heap->capacity(); }
-  static size_t  max_capacity()                  { return _heap->max_capacity(); }
-  static size_t  unallocated_capacity()          { return _heap->unallocated_capacity(); }
-  static double  reverse_free_ratio();
+  static size_t capacity(int code_blob_type)             { return heap_available(code_blob_type) ? get_code_heap(code_blob_type)->capacity() : 0; }
+  static size_t capacity();
+  static size_t unallocated_capacity(int code_blob_type) { return heap_available(code_blob_type) ? get_code_heap(code_blob_type)->unallocated_capacity() : 0; }
+  static size_t unallocated_capacity();
+  static size_t max_capacity(int code_blob_type)         { return heap_available(code_blob_type) ? get_code_heap(code_blob_type)->max_capacity() : 0; }
+  static size_t max_capacity();
+
+  static bool   is_full(int* code_blob_type);
+  static double reverse_free_ratio(int code_blob_type);
+
+  static bool needs_cache_clean()                     { return _needs_cache_clean; }
+  static void set_needs_cache_clean(bool v)           { _needs_cache_clean = v;    }
+  static void clear_inline_caches();                  // clear all inline caches
 
-  static bool needs_cache_clean()                { return _needs_cache_clean; }
-  static void set_needs_cache_clean(bool v)      { _needs_cache_clean = v;    }
-  static void clear_inline_caches();             // clear all inline caches
+  // Returns the CodeBlobType for nmethods of the given compilation level
+  static int get_code_blob_type(int comp_level) {
+    if (comp_level == CompLevel_none ||
+        comp_level == CompLevel_simple ||
+        comp_level == CompLevel_full_optimization) {
+      // Non profiled methods
+      return CodeBlobType::MethodNonProfiled;
+    } else if (comp_level == CompLevel_limited_profile ||
+               comp_level == CompLevel_full_profile) {
+      // Profiled methods
+      return CodeBlobType::MethodProfiled;
+    }
+    ShouldNotReachHere();
+    return 0;
+  }
 
   static void verify_clean_inline_caches();
   static void verify_icholder_relocations();
@@ -187,10 +219,87 @@
   static void make_marked_nmethods_zombies();
   static void make_marked_nmethods_not_entrant();
 
-    // tells how many nmethods have dependencies
+  // tells how many nmethods have dependencies
   static int number_of_nmethods_with_dependencies();
 
   static int get_codemem_full_count() { return _codemem_full_count; }
 };
 
+
+// Iterator to iterate over nmethods in the CodeCache.
+class NMethodIterator : public StackObj {
+ private:
+  CodeBlob* _code_blob;   // Current CodeBlob
+  int _code_blob_type;    // Refers to current CodeHeap
+
+ public:
+  NMethodIterator() {
+    initialize(NULL); // Set to NULL, initialized by first call to next()
+  }
+
+  NMethodIterator(nmethod* nm) {
+    initialize(nm);
+  }
+
+  // Advance iterator to next nmethod
+  bool next() {
+    assert_locked_or_safepoint(CodeCache_lock);
+    assert(_code_blob_type < CodeBlobType::NumTypes, "end reached");
+
+    bool result = next_nmethod();
+    while (!result && (_code_blob_type < CodeBlobType::MethodProfiled)) {
+      // Advance to next code heap if segmented code cache
+      _code_blob_type++;
+      result = next_nmethod();
+    }
+    return result;
+  }
+
+  // Advance iterator to next alive nmethod
+  bool next_alive() {
+    bool result = next();
+    while(result && !_code_blob->is_alive()) {
+      result = next();
+    }
+    return result;
+  }
+
+  bool end()        const   { return _code_blob == NULL; }
+  nmethod* method() const   { return (nmethod*)_code_blob; }
+
+private:
+  // Initialize iterator to given nmethod
+  void initialize(nmethod* nm) {
+    _code_blob = (CodeBlob*)nm;
+    if (!SegmentedCodeCache) {
+      // Iterate over all CodeBlobs
+      _code_blob_type = CodeBlobType::All;
+    } else if (nm != NULL) {
+      _code_blob_type = CodeCache::get_code_blob_type(nm->comp_level());
+    } else {
+      // Only iterate over method code heaps, starting with non-profiled
+      _code_blob_type = CodeBlobType::MethodNonProfiled;
+    }
+  }
+
+  // Advance iterator to the next nmethod in the current code heap
+  bool next_nmethod() {
+    // Get first method CodeBlob
+    if (_code_blob == NULL) {
+      _code_blob = CodeCache::first_blob(_code_blob_type);
+      if (_code_blob == NULL) {
+        return false;
+      } else if (_code_blob->is_nmethod()) {
+        return true;
+      }
+    }
+    // Search for next method CodeBlob
+    _code_blob = CodeCache::next_blob(_code_blob);
+    while (_code_blob != NULL && !_code_blob->is_nmethod()) {
+      _code_blob = CodeCache::next_blob(_code_blob);
+    }
+    return _code_blob != NULL;
+  }
+};
+
 #endif // SHARE_VM_CODE_CODECACHE_HPP
--- a/hotspot/src/share/vm/code/nmethod.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -500,7 +500,7 @@
     CodeOffsets offsets;
     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
-    nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size,
+    nm = new (native_nmethod_size, CompLevel_none) nmethod(method(), native_nmethod_size,
                                             compile_id, &offsets,
                                             code_buffer, frame_size,
                                             basic_lock_owner_sp_offset,
@@ -538,7 +538,7 @@
     offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
 
-    nm = new (nmethod_size) nmethod(method(), nmethod_size,
+    nm = new (nmethod_size, CompLevel_none) nmethod(method(), nmethod_size,
                                     &offsets, code_buffer, frame_size);
 
     NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_nmethod(nm));
@@ -586,7 +586,7 @@
       + round_to(nul_chk_table->size_in_bytes(), oopSize)
       + round_to(debug_info->data_size()       , oopSize);
 
-    nm = new (nmethod_size)
+    nm = new (nmethod_size, comp_level)
     nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
             orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
             oop_maps,
@@ -803,9 +803,11 @@
 }
 #endif // def HAVE_DTRACE_H
 
-void* nmethod::operator new(size_t size, int nmethod_size) throw() {
-  // Not critical, may return null if there is too little continuous memory
-  return CodeCache::allocate(nmethod_size);
+void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
+  // With a SegmentedCodeCache, nmethods are allocated on separate heaps and therefore do not share memory
+  // with critical CodeBlobs. We define the allocation as critical to make sure all code heap memory is used.
+  bool is_critical = SegmentedCodeCache;
+  return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level), is_critical);
 }
 
 nmethod::nmethod(
@@ -1530,7 +1532,7 @@
   Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
   if (PrintMethodFlushing) {
     tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
-        _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
+        _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(_comp_level))/1024);
   }
 
   // We need to deallocate any ExceptionCache data.
@@ -1557,7 +1559,6 @@
   CodeCache::free(this);
 }
 
-
 //
 // Notify all classes this nmethod is dependent on that it is no
 // longer dependent. This should only be called in two situations.
@@ -2418,15 +2419,18 @@
   // Turn off dependency tracing while actually testing dependencies.
   NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
 
- typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
-                           &DependencySignature::equals, 11027> DepTable;
-
- DepTable* table = new DepTable();
+  typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
+                            &DependencySignature::equals, 11027> DepTable;
+
+  DepTable* table = new DepTable();
 
   // Iterate over live nmethods and check dependencies of all nmethods that are not
   // marked for deoptimization. A particular dependency is only checked once.
-  for(nmethod* nm = CodeCache::alive_nmethod(CodeCache::first()); nm != NULL; nm = CodeCache::alive_nmethod(CodeCache::next(nm))) {
-    if (!nm->is_marked_for_deoptimization()) {
+  NMethodIterator iter;
+  while(iter.next()) {
+    nmethod* nm = iter.method();
+    // Only notify for live nmethods
+    if (nm->is_alive() && !nm->is_marked_for_deoptimization()) {
       for (Dependencies::DepStream deps(nm); deps.next(); ) {
         // Construct abstraction of a dependency.
         DependencySignature* current_sig = new DependencySignature(deps);
--- a/hotspot/src/share/vm/code/nmethod.hpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Wed Sep 17 08:00:07 2014 +0200
@@ -288,7 +288,7 @@
           int comp_level);
 
   // helper methods
-  void* operator new(size_t size, int nmethod_size) throw();
+  void* operator new(size_t size, int nmethod_size, int comp_level) throw();
 
   const char* reloc_string_for(u_char* begin, u_char* end);
   // Returns true if this thread changed the state of the nmethod or
--- a/hotspot/src/share/vm/code/vtableStubs.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/code/vtableStubs.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -63,7 +63,7 @@
    // If changing the name, update the other file accordingly.
     BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
     if (blob == NULL) {
-      CompileBroker::handle_full_code_cache();
+      CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
       return NULL;
     }
     _chunk = blob->content_begin();
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -1747,9 +1747,11 @@
     // We need this HandleMark to avoid leaking VM handles.
     HandleMark hm(thread);
 
-    if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
-      // the code cache is really full
-      handle_full_code_cache();
+    // Check if the CodeCache is full
+    int code_blob_type = 0;
+    if (CodeCache::is_full(&code_blob_type)) {
+      // The CodeHeap for code_blob_type is really full
+      handle_full_code_cache(code_blob_type);
     }
 
     CompileTask* task = queue->get();
@@ -2079,7 +2081,7 @@
  * The CodeCache is full.  Print out warning and disable compilation
  * or try code cache cleaning so compilation can continue later.
  */
-void CompileBroker::handle_full_code_cache() {
+void CompileBroker::handle_full_code_cache(int code_blob_type) {
   UseInterpreter = true;
   if (UseCompiler || AlwaysCompileLoopMethods ) {
     if (xtty != NULL) {
@@ -2096,8 +2098,6 @@
       xtty->end_elem();
     }
 
-    CodeCache::report_codemem_full();
-
 #ifndef PRODUCT
     if (CompileTheWorld || ExitOnFullCodeCache) {
       codecache_print(/* detailed= */ true);
@@ -2119,12 +2119,7 @@
       disable_compilation_forever();
     }
 
-    // Print warning only once
-    if (should_print_compiler_warning()) {
-      warning("CodeCache is full. Compiler has been disabled.");
-      warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
-      codecache_print(/* detailed= */ true);
-    }
+    CodeCache::report_codemem_full(code_blob_type, should_print_compiler_warning());
   }
 }
 
--- a/hotspot/src/share/vm/compiler/compileBroker.hpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/compiler/compileBroker.hpp	Wed Sep 17 08:00:07 2014 +0200
@@ -434,7 +434,7 @@
   static bool is_compilation_disabled_forever() {
     return _should_compile_new_jobs == shutdown_compilaton;
   }
-  static void handle_full_code_cache();
+  static void handle_full_code_cache(int code_blob_type);
   // Ensures that warning is only printed once.
   static bool should_print_compiler_warning() {
     jint old = Atomic::cmpxchg(1, &_print_compilation_warning, 0);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -5095,7 +5095,11 @@
       _num_entered_barrier(0)
   {
     nmethod::increase_unloading_clock();
-    _first_nmethod = CodeCache::alive_nmethod(CodeCache::first());
+    // Get first alive nmethod
+    NMethodIterator iter = NMethodIterator();
+    if(iter.next_alive()) {
+      _first_nmethod = iter.method();
+    }
     _claimed_nmethod = (volatile nmethod*)_first_nmethod;
   }
 
@@ -5138,27 +5142,26 @@
 
   void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {
     nmethod* first;
-    nmethod* last;
+    NMethodIterator last;
 
     do {
       *num_claimed_nmethods = 0;
 
-      first = last = (nmethod*)_claimed_nmethod;
+      first = (nmethod*)_claimed_nmethod;
+      last = NMethodIterator(first);
 
       if (first != NULL) {
+
         for (int i = 0; i < MaxClaimNmethods; i++) {
-          last = CodeCache::alive_nmethod(CodeCache::next(last));
-
-          if (last == NULL) {
+          if (!last.next_alive()) {
             break;
           }
-
-          claimed_nmethods[i] = last;
+          claimed_nmethods[i] = last.method();
           (*num_claimed_nmethods)++;
         }
       }
 
-    } while ((nmethod*)Atomic::cmpxchg_ptr(last, &_claimed_nmethod, first) != first);
+    } while ((nmethod*)Atomic::cmpxchg_ptr(last.method(), &_claimed_nmethod, first) != first);
   }
 
   nmethod* claim_postponed_nmethod() {
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -1077,7 +1077,7 @@
 address SignatureHandlerLibrary::set_handler_blob() {
   BufferBlob* handler_blob = BufferBlob::create("native signature handlers", blob_size);
   if (handler_blob == NULL) {
-    CompileBroker::handle_full_code_cache();
+    CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
     return NULL;
   }
   address handler = handler_blob->code_begin();
--- a/hotspot/src/share/vm/memory/heap.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/memory/heap.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -35,7 +35,9 @@
 
 // Implementation of Heap
 
-CodeHeap::CodeHeap() {
+CodeHeap::CodeHeap(const char* name, const int code_blob_type)
+  : _code_blob_type(code_blob_type) {
+  _name                         = name;
   _number_of_committed_segments = 0;
   _number_of_reserved_segments  = 0;
   _segment_size                 = 0;
@@ -44,6 +46,8 @@
   _freelist                     = NULL;
   _freelist_segments            = 0;
   _freelist_length              = 0;
+  _max_allocated_capacity       = 0;
+  _was_full                     = false;
 }
 
 
@@ -88,9 +92,8 @@
 }
 
 
-bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
-                       size_t segment_size) {
-  assert(reserved_size >= committed_size, "reserved < committed");
+bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_size) {
+  assert(rs.size() >= committed_size, "reserved < committed");
   assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
   assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
 
@@ -99,17 +102,12 @@
 
   // Reserve and initialize space for _memory.
   const size_t page_size = os::can_execute_large_page_memory() ?
-          os::page_size_for_region(committed_size, reserved_size, 8) :
+          os::page_size_for_region(committed_size, rs.size(), 8) :
           os::vm_page_size();
   const size_t granularity = os::vm_allocation_granularity();
-  const size_t r_align = MAX2(page_size, granularity);
-  const size_t r_size = align_size_up(reserved_size, r_align);
   const size_t c_size = align_size_up(committed_size, page_size);
 
-  const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
-    MAX2(page_size, granularity);
-  ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
-  os::trace_page_sizes("code heap", committed_size, reserved_size, page_size,
+  os::trace_page_sizes(_name, committed_size, rs.size(), page_size,
                        rs.base(), rs.size());
   if (!_memory.initialize(rs, c_size)) {
     return false;
@@ -182,6 +180,7 @@
     assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check");
     assert(!block->free(), "must be marked free");
     DEBUG_ONLY(memset((void*)block->allocated_space(), badCodeHeapNewVal, instance_size));
+    _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
     return block->allocated_space();
   }
 
@@ -203,6 +202,7 @@
     b->initialize(number_of_segments);
     _next_segment += number_of_segments;
     DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size));
+    _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
     return b->allocated_space();
   } else {
     return NULL;
--- a/hotspot/src/share/vm/memory/heap.hpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/memory/heap.hpp	Wed Sep 17 08:00:07 2014 +0200
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_MEMORY_HEAP_HPP
 #define SHARE_VM_MEMORY_HEAP_HPP
 
+#include "code/codeBlob.hpp"
 #include "memory/allocation.hpp"
 #include "runtime/virtualspace.hpp"
 
@@ -93,6 +94,11 @@
   FreeBlock*   _freelist;
   size_t       _freelist_segments;               // No. of segments in freelist
   int          _freelist_length;
+  size_t       _max_allocated_capacity;          // Peak capacity that was allocated during lifetime of the heap
+
+  const char*  _name;                            // Name of the CodeHeap
+  const int    _code_blob_type;                  // CodeBlobType it contains
+  bool         _was_full;                        // True if the code heap was full
 
   enum { free_sentinel = 0xFF };
 
@@ -127,10 +133,10 @@
   void clear();                                 // clears all heap contents
 
  public:
-  CodeHeap();
+  CodeHeap(const char* name, const int code_blob_type);
 
   // Heap extents
-  bool  reserve(size_t reserved_size, size_t committed_size, size_t segment_size);
+  bool  reserve(ReservedSpace rs, size_t committed_size, size_t segment_size);
   bool  expand_by(size_t size);                  // expands committed memory by size
 
   // Memory allocation
@@ -161,8 +167,18 @@
   size_t max_capacity() const;
   int    allocated_segments() const;
   size_t allocated_capacity() const;
+  size_t max_allocated_capacity() const          { return _max_allocated_capacity; }
   size_t unallocated_capacity() const            { return max_capacity() - allocated_capacity(); }
 
+  // Returns true if the CodeHeap contains CodeBlobs of the given type
+  bool accepts(int code_blob_type) const         { return (_code_blob_type == code_blob_type); }
+  int code_blob_type() const                     { return _code_blob_type; }
+
+  // Debugging / Profiling
+  const char* name() const                       { return _name; }
+  bool was_full()                                { return _was_full; }
+  void report_full()                             { _was_full = true; }
+
 private:
   size_t heap_unallocated_capacity() const;
 
--- a/hotspot/src/share/vm/opto/c2compiler.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/c2compiler.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -24,7 +24,9 @@
 
 #include "precompiled.hpp"
 #include "opto/c2compiler.hpp"
+#include "opto/compile.hpp"
 #include "opto/optoreg.hpp"
+#include "opto/output.hpp"
 #include "opto/runtime.hpp"
 
 // register information defined by ADLC
@@ -147,3 +149,8 @@
 void C2Compiler::print_timers() {
   // do nothing
 }
+
+int C2Compiler::initial_code_buffer_size() {
+  assert(SegmentedCodeCache, "Should be only used with a segmented code cache");
+  return Compile::MAX_inst_size + Compile::MAX_locs_size + initial_const_capacity;
+}
--- a/hotspot/src/share/vm/opto/c2compiler.hpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/c2compiler.hpp	Wed Sep 17 08:00:07 2014 +0200
@@ -50,6 +50,9 @@
 
   // Print compilation timers and statistics
   void print_timers();
+
+  // Initial size of the code buffer (may be increased at runtime)
+  static int initial_code_buffer_size();
 };
 
 #endif // SHARE_VM_OPTO_C2COMPILER_HPP
--- a/hotspot/src/share/vm/opto/compile.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/compile.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -535,7 +535,7 @@
     if (scratch_buffer_blob() == NULL) {
       // Let CompilerBroker disable further compilations.
       record_failure("Not enough space for scratch buffer in CodeCache");
-      CompileBroker::handle_full_code_cache();
+      CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
       return;
     }
   }
--- a/hotspot/src/share/vm/opto/output.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/output.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -1166,7 +1166,7 @@
   // Have we run out of code space?
   if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
     C->record_failure("CodeCache is full");
-    CompileBroker::handle_full_code_cache();
+    CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
     return NULL;
   }
   // Configure the code buffer.
@@ -1491,7 +1491,7 @@
       cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
       if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
         C->record_failure("CodeCache is full");
-        CompileBroker::handle_full_code_cache();
+        CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
         return;
       }
 
@@ -1648,7 +1648,7 @@
   // One last check for failed CodeBuffer::expand:
   if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
     C->record_failure("CodeCache is full");
-    CompileBroker::handle_full_code_cache();
+    CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
     return;
   }
 
--- a/hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -228,19 +228,17 @@
   // created nmethod will notify normally and nmethods which are freed
   // can be safely skipped.
   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-  nmethod* current = CodeCache::first_nmethod();
-  while (current != NULL) {
-    // Only notify for live nmethods
-    if (current->is_alive()) {
-      // Lock the nmethod so it can't be freed
-      nmethodLocker nml(current);
+  // Iterate over non-profiled and profiled nmethods
+  NMethodIterator iter;
+  while(iter.next_alive()) {
+    nmethod* current = iter.method();
+    // Lock the nmethod so it can't be freed
+    nmethodLocker nml(current);
 
-      // Don't hold the lock over the notify or jmethodID creation
-      MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-      current->get_and_cache_jmethod_id();
-      JvmtiExport::post_compiled_method_load(current);
-    }
-    current = CodeCache::next_nmethod(current);
+    // Don't hold the lock over the notify or jmethodID creation
+    MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    current->get_and_cache_jmethod_id();
+    JvmtiExport::post_compiled_method_load(current);
   }
   return JVMTI_ERROR_NONE;
 }
--- a/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -215,7 +215,7 @@
   // The main intention is to keep enough free space for C2 compiled code
   // to achieve peak performance if the code cache is under stress.
   if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization))  {
-    double current_reverse_free_ratio = CodeCache::reverse_free_ratio();
+    double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level));
     if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
       k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
     }
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -1143,7 +1143,27 @@
   }
   // Increase the code cache size - tiered compiles a lot more.
   if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
-    FLAG_SET_DEFAULT(ReservedCodeCacheSize, ReservedCodeCacheSize * 5);
+    FLAG_SET_ERGO(uintx, ReservedCodeCacheSize, ReservedCodeCacheSize * 5);
+  }
+  // Enable SegmentedCodeCache if TieredCompilation is enabled and ReservedCodeCacheSize >= 240M
+  if (FLAG_IS_DEFAULT(SegmentedCodeCache) && ReservedCodeCacheSize >= 240*M) {
+    FLAG_SET_ERGO(bool, SegmentedCodeCache, true);
+
+    // Multiply sizes by 5 but fix NonMethodCodeHeapSize (distribute among non-profiled and profiled code heap)
+    if (FLAG_IS_DEFAULT(ProfiledCodeHeapSize)) {
+      FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, ProfiledCodeHeapSize * 5 + NonMethodCodeHeapSize * 2);
+    }
+    if (FLAG_IS_DEFAULT(NonProfiledCodeHeapSize)) {
+      FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize * 5 + NonMethodCodeHeapSize * 2);
+    }
+    // Check consistency of code heap sizes
+    if ((NonMethodCodeHeapSize + NonProfiledCodeHeapSize + ProfiledCodeHeapSize) != ReservedCodeCacheSize) {
+      jio_fprintf(defaultStream::error_stream(),
+                  "Invalid code heap sizes: NonMethodCodeHeapSize(%dK) + ProfiledCodeHeapSize(%dK) + NonProfiledCodeHeapSize(%dK) = %dK. Must be equal to ReservedCodeCacheSize = %uK.\n",
+                  NonMethodCodeHeapSize/K, ProfiledCodeHeapSize/K, NonProfiledCodeHeapSize/K,
+                  (NonMethodCodeHeapSize + ProfiledCodeHeapSize + NonProfiledCodeHeapSize)/K, ReservedCodeCacheSize/K);
+      vm_exit(1);
+    }
   }
   if (!UseInterpreter) { // -Xcomp
     Tier3InvokeNotifyFreqLog = 0;
@@ -2442,6 +2462,18 @@
                 "Invalid ReservedCodeCacheSize=%dM. Must be at most %uM.\n", ReservedCodeCacheSize/M,
                 (2*G)/M);
     status = false;
+  } else if (NonMethodCodeHeapSize < min_code_cache_size){
+    jio_fprintf(defaultStream::error_stream(),
+                "Invalid NonMethodCodeHeapSize=%dK. Must be at least %uK.\n", NonMethodCodeHeapSize/K,
+                min_code_cache_size/K);
+    status = false;
+  } else if ((!FLAG_IS_DEFAULT(NonMethodCodeHeapSize) || !FLAG_IS_DEFAULT(ProfiledCodeHeapSize) || !FLAG_IS_DEFAULT(NonProfiledCodeHeapSize))
+             && (NonMethodCodeHeapSize + NonProfiledCodeHeapSize + ProfiledCodeHeapSize) != ReservedCodeCacheSize) {
+    jio_fprintf(defaultStream::error_stream(),
+                "Invalid code heap sizes: NonMethodCodeHeapSize(%dK) + ProfiledCodeHeapSize(%dK) + NonProfiledCodeHeapSize(%dK) = %dK. Must be equal to ReservedCodeCacheSize = %uK.\n",
+                NonMethodCodeHeapSize/K, ProfiledCodeHeapSize/K, NonProfiledCodeHeapSize/K,
+                (NonMethodCodeHeapSize + ProfiledCodeHeapSize + NonProfiledCodeHeapSize)/K, ReservedCodeCacheSize/K);
+    status = false;
   }
 
   status &= verify_interval(NmethodSweepFraction, 1, ReservedCodeCacheSize/K, "NmethodSweepFraction");
@@ -2868,8 +2900,41 @@
         return JNI_EINVAL;
       }
       FLAG_SET_CMDLINE(uintx, ReservedCodeCacheSize, (uintx)long_ReservedCodeCacheSize);
+      // -XX:NonMethodCodeHeapSize=
+    } else if (match_option(option, "-XX:NonMethodCodeHeapSize=", &tail)) {
+      julong long_NonMethodCodeHeapSize = 0;
+
+      ArgsRange errcode = parse_memory_size(tail, &long_NonMethodCodeHeapSize, 1);
+      if (errcode != arg_in_range) {
+        jio_fprintf(defaultStream::error_stream(),
+                    "Invalid maximum non-method code heap size: %s.\n", option->optionString);
+        return JNI_EINVAL;
+      }
+      FLAG_SET_CMDLINE(uintx, NonMethodCodeHeapSize, (uintx)long_NonMethodCodeHeapSize);
+      // -XX:ProfiledCodeHeapSize=
+    } else if (match_option(option, "-XX:ProfiledCodeHeapSize=", &tail)) {
+      julong long_ProfiledCodeHeapSize = 0;
+
+      ArgsRange errcode = parse_memory_size(tail, &long_ProfiledCodeHeapSize, 1);
+      if (errcode != arg_in_range) {
+        jio_fprintf(defaultStream::error_stream(),
+                    "Invalid maximum profiled code heap size: %s.\n", option->optionString);
+        return JNI_EINVAL;
+      }
+      FLAG_SET_CMDLINE(uintx, ProfiledCodeHeapSize, (uintx)long_ProfiledCodeHeapSize);
+      // -XX:NonProfiledCodeHeapSizee=
+    } else if (match_option(option, "-XX:NonProfiledCodeHeapSize=", &tail)) {
+      julong long_NonProfiledCodeHeapSize = 0;
+
+      ArgsRange errcode = parse_memory_size(tail, &long_NonProfiledCodeHeapSize, 1);
+      if (errcode != arg_in_range) {
+        jio_fprintf(defaultStream::error_stream(),
+                    "Invalid maximum non-profiled code heap size: %s.\n", option->optionString);
+        return JNI_EINVAL;
+      }
+      FLAG_SET_CMDLINE(uintx, NonProfiledCodeHeapSize, (uintx)long_NonProfiledCodeHeapSize);
       //-XX:IncreaseFirstTierCompileThresholdAt=
-      } else if (match_option(option, "-XX:IncreaseFirstTierCompileThresholdAt=", &tail)) {
+    } else if (match_option(option, "-XX:IncreaseFirstTierCompileThresholdAt=", &tail)) {
         uintx uint_IncreaseFirstTierCompileThresholdAt = 0;
         if (!parse_uintx(tail, &uint_IncreaseFirstTierCompileThresholdAt, 0) || uint_IncreaseFirstTierCompileThresholdAt > 99) {
           jio_fprintf(defaultStream::error_stream(),
--- a/hotspot/src/share/vm/runtime/fprofiler.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/runtime/fprofiler.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -165,7 +165,7 @@
   for (int index = 0; index < s; index++) {
     counters[index] = 0;
   }
-  base = CodeCache::first_address();
+  base = CodeCache::low_bound();
 }
 
 void PCRecorder::record(address pc) {
--- a/hotspot/src/share/vm/runtime/globals.hpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Wed Sep 17 08:00:07 2014 +0200
@@ -186,6 +186,10 @@
 define_pd_global(intx, InlineUnsafeOps,              true);
 define_pd_global(intx, InitialCodeCacheSize,         160*K);
 define_pd_global(intx, ReservedCodeCacheSize,        32*M);
+define_pd_global(intx, NonProfiledCodeHeapSize,      0);
+define_pd_global(intx, ProfiledCodeHeapSize,         0);
+define_pd_global(intx, NonMethodCodeHeapSize,        32*M);
+
 define_pd_global(intx, CodeCacheExpansionSize,       32*K);
 define_pd_global(intx, CodeCacheMinBlockLength,      1);
 define_pd_global(intx, CodeCacheMinimumUseSpace,     200*K);
@@ -3354,9 +3358,21 @@
   develop_pd(uintx, CodeCacheMinimumUseSpace,                               \
           "Minimum code cache size (in bytes) required to start VM.")       \
                                                                             \
+  product(bool, SegmentedCodeCache, false,                                  \
+          "Use a segmented code cache")                                     \
+                                                                            \
   product_pd(uintx, ReservedCodeCacheSize,                                  \
           "Reserved code cache size (in bytes) - maximum code cache size")  \
                                                                             \
+  product_pd(uintx, NonProfiledCodeHeapSize,                                \
+          "Size of code heap with non-profiled methods (in bytes)")         \
+                                                                            \
+  product_pd(uintx, ProfiledCodeHeapSize,                                   \
+          "Size of code heap with profiled methods (in bytes)")             \
+                                                                            \
+  product_pd(uintx, NonMethodCodeHeapSize,                                  \
+          "Size of code heap with non-methods (in bytes)")                  \
+                                                                            \
   product(uintx, CodeCacheMinimumFreeSpace, 500*K,                          \
           "When less than X space left, we stop compiling")                 \
                                                                             \
--- a/hotspot/src/share/vm/runtime/init.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/runtime/init.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -49,6 +49,7 @@
 void management_init();
 void bytecodes_init();
 void classLoader_init();
+void compilationPolicy_init();
 void codeCache_init();
 void VM_Version_init();
 void os_init_globals();        // depends on VM_Version_init, before universe_init
@@ -68,7 +69,6 @@
 void vtableStubs_init();
 void InlineCacheBuffer_init();
 void compilerOracle_init();
-void compilationPolicy_init();
 void compileBroker_init();
 
 // Initialization after compiler initialization
@@ -97,6 +97,7 @@
   management_init();
   bytecodes_init();
   classLoader_init();
+  compilationPolicy_init();
   codeCache_init();
   VM_Version_init();
   os_init_globals();
@@ -123,7 +124,6 @@
   vtableStubs_init();
   InlineCacheBuffer_init();
   compilerOracle_init();
-  compilationPolicy_init();
   compileBroker_init();
   VMRegImpl::set_regName();
 
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -2422,7 +2422,7 @@
       // Ought to log this but compile log is only per compile thread
       // and we're some non descript Java thread.
       MutexUnlocker mu(AdapterHandlerLibrary_lock);
-      CompileBroker::handle_full_code_cache();
+      CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
       return NULL; // Out of CodeCache space
     }
     entry->relocate(new_adapter->content_begin());
@@ -2596,7 +2596,7 @@
     nm->post_compiled_method_load_event();
   } else {
     // CodeCache is full, disable compilation
-    CompileBroker::handle_full_code_cache();
+    CompileBroker::handle_full_code_cache(CodeBlobType::MethodNonProfiled);
   }
 }
 
--- a/hotspot/src/share/vm/runtime/sweeper.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/runtime/sweeper.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -131,7 +131,7 @@
 #define SWEEP(nm)
 #endif
 
-nmethod* NMethodSweeper::_current                      = NULL; // Current nmethod
+NMethodIterator NMethodSweeper::_current;                      // Current nmethod
 long     NMethodSweeper::_traversals                   = 0;    // Stack scan count, also sweep ID.
 long     NMethodSweeper::_total_nof_code_cache_sweeps  = 0;    // Total number of full sweeps of the code cache
 long     NMethodSweeper::_time_counter                 = 0;    // Virtual time used to periodically invoke sweeper
@@ -150,26 +150,24 @@
                                                                //   3) zombie      -> marked_for_reclamation
 int    NMethodSweeper::_hotness_counter_reset_val       = 0;
 
-long   NMethodSweeper::_total_nof_methods_reclaimed     = 0;    // Accumulated nof methods flushed
-long   NMethodSweeper::_total_nof_c2_methods_reclaimed  = 0;    // Accumulated nof methods flushed
-size_t NMethodSweeper::_total_flushed_size              = 0;    // Total number of bytes flushed from the code cache
-Tickspan  NMethodSweeper::_total_time_sweeping;                 // Accumulated time sweeping
-Tickspan  NMethodSweeper::_total_time_this_sweep;               // Total time this sweep
-Tickspan  NMethodSweeper::_peak_sweep_time;                     // Peak time for a full sweep
-Tickspan  NMethodSweeper::_peak_sweep_fraction_time;            // Peak time sweeping one fraction
-
+long   NMethodSweeper::_total_nof_methods_reclaimed     = 0;   // Accumulated nof methods flushed
+long   NMethodSweeper::_total_nof_c2_methods_reclaimed  = 0;   // Accumulated nof methods flushed
+size_t NMethodSweeper::_total_flushed_size              = 0;   // Total number of bytes flushed from the code cache
+Tickspan NMethodSweeper::_total_time_sweeping;                 // Accumulated time sweeping
+Tickspan NMethodSweeper::_total_time_this_sweep;               // Total time this sweep
+Tickspan NMethodSweeper::_peak_sweep_time;                     // Peak time for a full sweep
+Tickspan NMethodSweeper::_peak_sweep_fraction_time;            // Peak time sweeping one fraction
 
 
 class MarkActivationClosure: public CodeBlobClosure {
 public:
   virtual void do_code_blob(CodeBlob* cb) {
-    if (cb->is_nmethod()) {
-      nmethod* nm = (nmethod*)cb;
-      nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
-      // If we see an activation belonging to a non_entrant nmethod, we mark it.
-      if (nm->is_not_entrant()) {
-        nm->mark_as_seen_on_stack();
-      }
+    assert(cb->is_nmethod(), "CodeBlob should be nmethod");
+    nmethod* nm = (nmethod*)cb;
+    nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
+    // If we see an activation belonging to a non_entrant nmethod, we mark it.
+    if (nm->is_not_entrant()) {
+      nm->mark_as_seen_on_stack();
     }
   }
 };
@@ -178,10 +176,9 @@
 class SetHotnessClosure: public CodeBlobClosure {
 public:
   virtual void do_code_blob(CodeBlob* cb) {
-    if (cb->is_nmethod()) {
-      nmethod* nm = (nmethod*)cb;
-      nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
-    }
+    assert(cb->is_nmethod(), "CodeBlob should be nmethod");
+    nmethod* nm = (nmethod*)cb;
+    nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
   }
 };
 static SetHotnessClosure set_hotness_closure;
@@ -194,7 +191,7 @@
   return _hotness_counter_reset_val;
 }
 bool NMethodSweeper::sweep_in_progress() {
-  return (_current != NULL);
+  return !_current.end();
 }
 
 // Scans the stacks of all Java threads and marks activations of not-entrant methods.
@@ -212,11 +209,13 @@
   _time_counter++;
 
   // Check for restart
-  assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
+  assert(CodeCache::find_blob_unsafe(_current.method()) == _current.method(), "Sweeper nmethod cached state invalid");
   if (!sweep_in_progress()) {
     _seen = 0;
     _sweep_fractions_left = NmethodSweepFraction;
-    _current = CodeCache::first_nmethod();
+    _current = NMethodIterator();
+    // Initialize to first nmethod
+    _current.next();
     _traversals += 1;
     _total_time_this_sweep = Tickspan();
 
@@ -271,7 +270,9 @@
     // an unsigned type would cause an underflow (wait_until_next_sweep becomes a large positive
     // value) that disables the intended periodic sweeps.
     const int max_wait_time = ReservedCodeCacheSize / (16 * M);
-    double wait_until_next_sweep = max_wait_time - time_since_last_sweep - CodeCache::reverse_free_ratio();
+    double wait_until_next_sweep = max_wait_time - time_since_last_sweep -
+        MAX2(CodeCache::reverse_free_ratio(CodeBlobType::MethodProfiled),
+             CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled));
     assert(wait_until_next_sweep <= (double)max_wait_time, "Calculation of code cache sweeper interval is incorrect");
 
     if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) {
@@ -353,7 +354,7 @@
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 
     // The last invocation iterates until there are no more nmethods
-    for (int i = 0; (i < todo || _sweep_fractions_left == 1) && _current != NULL; i++) {
+    while ((swept_count < todo || _sweep_fractions_left == 1) && !_current.end()) {
       swept_count++;
       if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
         if (PrintMethodFlushing && Verbose) {
@@ -369,19 +370,19 @@
       // Since we will give up the CodeCache_lock, always skip ahead
       // to the next nmethod.  Other blobs can be deleted by other
       // threads but nmethods are only reclaimed by the sweeper.
-      nmethod* next = CodeCache::next_nmethod(_current);
+      nmethod* nm = _current.method();
+      _current.next();
 
       // Now ready to process nmethod and give up CodeCache_lock
       {
         MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-        freed_memory += process_nmethod(_current);
+        freed_memory += process_nmethod(nm);
       }
       _seen++;
-      _current = next;
     }
   }
 
-  assert(_sweep_fractions_left > 1 || _current == NULL, "must have scanned the whole cache");
+  assert(_sweep_fractions_left > 1 || _current.end(), "must have scanned the whole cache");
 
   const Ticks sweep_end_counter = Ticks::now();
   const Tickspan sweep_time = sweep_end_counter - sweep_start_counter;
@@ -594,7 +595,8 @@
       // ReservedCodeCacheSize
       int reset_val = hotness_counter_reset_val();
       int time_since_reset = reset_val - nm->hotness_counter();
-      double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity);
+      int code_blob_type = (CodeCache::get_code_blob_type(nm->comp_level()));
+      double threshold = -reset_val + (CodeCache::reverse_free_ratio(code_blob_type) * NmethodSweepActivity);
       // The less free space in the code cache we have - the bigger reverse_free_ratio() is.
       // I.e., 'threshold' increases with lower available space in the code cache and a higher
       // NmethodSweepActivity. If the current hotness counter - which decreases from its initial
--- a/hotspot/src/share/vm/runtime/sweeper.hpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/runtime/sweeper.hpp	Wed Sep 17 08:00:07 2014 +0200
@@ -54,33 +54,33 @@
 //     is full.
 
 class NMethodSweeper : public AllStatic {
-  static long      _traversals;                     // Stack scan count, also sweep ID.
-  static long      _total_nof_code_cache_sweeps;    // Total number of full sweeps of the code cache
-  static long      _time_counter;                   // Virtual time used to periodically invoke sweeper
-  static long      _last_sweep;                     // Value of _time_counter when the last sweep happened
-  static nmethod*  _current;                        // Current nmethod
-  static int       _seen;                           // Nof. nmethod we have currently processed in current pass of CodeCache
-  static int       _flushed_count;                  // Nof. nmethods flushed in current sweep
-  static int       _zombified_count;                // Nof. nmethods made zombie in current sweep
-  static int       _marked_for_reclamation_count;   // Nof. nmethods marked for reclaim in current sweep
+  static long      _traversals;                   // Stack scan count, also sweep ID.
+  static long      _total_nof_code_cache_sweeps;  // Total number of full sweeps of the code cache
+  static long      _time_counter;                 // Virtual time used to periodically invoke sweeper
+  static long      _last_sweep;                   // Value of _time_counter when the last sweep happened
+  static NMethodIterator _current;                // Current nmethod
+  static int       _seen;                         // Nof. nmethod we have currently processed in current pass of CodeCache
+  static int       _flushed_count;                // Nof. nmethods flushed in current sweep
+  static int       _zombified_count;              // Nof. nmethods made zombie in current sweep
+  static int       _marked_for_reclamation_count; // Nof. nmethods marked for reclaim in current sweep
 
-  static volatile int  _sweep_fractions_left;       // Nof. invocations left until we are completed with this pass
-  static volatile int  _sweep_started;              // Flag to control conc sweeper
-  static volatile bool _should_sweep;               // Indicates if we should invoke the sweeper
-  static volatile int  _bytes_changed;              // Counts the total nmethod size if the nmethod changed from:
-                                                    //   1) alive       -> not_entrant
-                                                    //   2) not_entrant -> zombie
-                                                    //   3) zombie      -> marked_for_reclamation
+  static volatile int  _sweep_fractions_left;     // Nof. invocations left until we are completed with this pass
+  static volatile int  _sweep_started;            // Flag to control conc sweeper
+  static volatile bool _should_sweep;             // Indicates if we should invoke the sweeper
+  static volatile int _bytes_changed;             // Counts the total nmethod size if the nmethod changed from:
+                                                  //   1) alive       -> not_entrant
+                                                  //   2) not_entrant -> zombie
+                                                  //   3) zombie      -> marked_for_reclamation
   // Stat counters
   static long      _total_nof_methods_reclaimed;    // Accumulated nof methods flushed
   static long      _total_nof_c2_methods_reclaimed; // Accumulated nof C2-compiled methods flushed
   static size_t    _total_flushed_size;             // Total size of flushed methods
   static int       _hotness_counter_reset_val;
 
-  static Tickspan  _total_time_sweeping;            // Accumulated time sweeping
-  static Tickspan  _total_time_this_sweep;          // Total time this sweep
-  static Tickspan  _peak_sweep_time;                // Peak time for a full sweep
-  static Tickspan  _peak_sweep_fraction_time;       // Peak time sweeping one fraction
+  static Tickspan  _total_time_sweeping;          // Accumulated time sweeping
+  static Tickspan  _total_time_this_sweep;        // Total time this sweep
+  static Tickspan  _peak_sweep_time;              // Peak time for a full sweep
+  static Tickspan  _peak_sweep_fraction_time;     // Peak time sweeping one fraction
 
   static int  process_nmethod(nmethod *nm);
   static void release_nmethod(nmethod* nm);
@@ -98,7 +98,7 @@
 
 
 #ifdef ASSERT
-  static bool is_sweeping(nmethod* which) { return _current == which; }
+  static bool is_sweeping(nmethod* which) { return _current.method() == which; }
   // Keep track of sweeper activity in the ring buffer
   static void record_sweep(nmethod* nm, int line);
   static void report_events(int id, address entry);
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -765,8 +765,8 @@
   /* CodeCache (NOTE: incomplete) */                                                                                                 \
   /********************************/                                                                                                 \
                                                                                                                                      \
-     static_field(CodeCache,                   _heap,                                         CodeHeap*)                             \
-     static_field(CodeCache,                   _scavenge_root_nmethods,                       nmethod*)                              \
+  static_field(CodeCache,                      _heaps,                                        GrowableArray<CodeHeap*>*)             \
+  static_field(CodeCache,                      _scavenge_root_nmethods,                       nmethod*)                              \
                                                                                                                                      \
   /*******************************/                                                                                                  \
   /* CodeHeap (NOTE: incomplete) */                                                                                                  \
--- a/hotspot/src/share/vm/services/memoryService.cpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/services/memoryService.cpp	Wed Sep 17 08:00:07 2014 +0200
@@ -63,7 +63,9 @@
 
 GCMemoryManager* MemoryService::_minor_gc_manager      = NULL;
 GCMemoryManager* MemoryService::_major_gc_manager      = NULL;
-MemoryPool*      MemoryService::_code_heap_pool        = NULL;
+MemoryManager*   MemoryService::_code_cache_manager    = NULL;
+GrowableArray<MemoryPool*>* MemoryService::_code_heap_pools =
+    new (ResourceObj::C_HEAP, mtInternal) GrowableArray<MemoryPool*>(init_code_heap_pools_size, true);
 MemoryPool*      MemoryService::_metaspace_pool        = NULL;
 MemoryPool*      MemoryService::_compressed_class_pool = NULL;
 
@@ -388,15 +390,21 @@
 }
 #endif // INCLUDE_ALL_GCS
 
-void MemoryService::add_code_heap_memory_pool(CodeHeap* heap) {
-  _code_heap_pool = new CodeHeapPool(heap,
-                                     "Code Cache",
-                                     true /* support_usage_threshold */);
-  MemoryManager* mgr = MemoryManager::get_code_cache_memory_manager();
-  mgr->add_pool(_code_heap_pool);
+void MemoryService::add_code_heap_memory_pool(CodeHeap* heap, const char* name) {
+  // Create new memory pool for this heap
+  MemoryPool* code_heap_pool = new CodeHeapPool(heap, name, true /* support_usage_threshold */);
+
+  // Append to lists
+  _code_heap_pools->append(code_heap_pool);
+  _pools_list->append(code_heap_pool);
 
-  _pools_list->append(_code_heap_pool);
-  _managers_list->append(mgr);
+  if (_code_cache_manager == NULL) {
+    // Create CodeCache memory manager
+    _code_cache_manager = MemoryManager::get_code_cache_memory_manager();
+    _managers_list->append(_code_cache_manager);
+  }
+
+  _code_cache_manager->add_pool(code_heap_pool);
 }
 
 void MemoryService::add_metaspace_memory_pools() {
--- a/hotspot/src/share/vm/services/memoryService.hpp	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/services/memoryService.hpp	Wed Sep 17 08:00:07 2014 +0200
@@ -53,7 +53,8 @@
 private:
   enum {
     init_pools_list_size = 10,
-    init_managers_list_size = 5
+    init_managers_list_size = 5,
+    init_code_heap_pools_size = 9
   };
 
   // index for minor and major generations
@@ -70,8 +71,9 @@
   static GCMemoryManager*               _major_gc_manager;
   static GCMemoryManager*               _minor_gc_manager;
 
-  // Code heap memory pool
-  static MemoryPool*                    _code_heap_pool;
+  // memory manager and code heap pools for the CodeCache
+  static MemoryManager*                 _code_cache_manager;
+  static GrowableArray<MemoryPool*>*    _code_heap_pools;
 
   static MemoryPool*                    _metaspace_pool;
   static MemoryPool*                    _compressed_class_pool;
@@ -123,7 +125,7 @@
 
 public:
   static void set_universe_heap(CollectedHeap* heap);
-  static void add_code_heap_memory_pool(CodeHeap* heap);
+  static void add_code_heap_memory_pool(CodeHeap* heap, const char* name);
   static void add_metaspace_memory_pools();
 
   static MemoryPool*    get_memory_pool(instanceHandle pool);
@@ -146,7 +148,10 @@
 
   static void track_memory_usage();
   static void track_code_cache_memory_usage() {
-    track_memory_pool_usage(_code_heap_pool);
+    // Track memory pool usage of all CodeCache memory pools
+    for (int i = 0; i < _code_heap_pools->length(); ++i) {
+      track_memory_pool_usage(_code_heap_pools->at(i));
+    }
   }
   static void track_metaspace_memory_usage() {
     track_memory_pool_usage(_metaspace_pool);
--- a/hotspot/src/share/vm/trace/trace.xml	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/trace/trace.xml	Wed Sep 17 08:00:07 2014 +0200
@@ -394,6 +394,7 @@
 
     <event id="CodeCacheFull" path="vm/code_cache/full" label="Code Cache Full"
          has_thread="true" is_requestable="false" is_constant="false" is_instant="true">
+      <value type="CODEBLOBTYPE" field="codeBlobType" label="Code Heap"/>
       <value type="ADDRESS" field="startAddress" label="Start Address"/>
       <value type="ADDRESS" field="commitedTopAddress" label="Commited Top"/>
       <value type="ADDRESS" field="reservedTopAddress" label="Reserved Top"/>
--- a/hotspot/src/share/vm/trace/tracetypes.xml	Tue Sep 16 14:39:11 2014 +0200
+++ b/hotspot/src/share/vm/trace/tracetypes.xml	Wed Sep 17 08:00:07 2014 +0200
@@ -170,6 +170,11 @@
                   type="U1" jvm_type="FLAGVALUEORIGIN">
       <value type="UTF8" field="origin" label="origin" />
     </content_type>
+    
+    <content_type id="CodeBlobType" hr_name="Code Blob Type"
+                  type="U1" jvm_type="CODEBLOBTYPE">
+      <value type="UTF8" field="type" label="type" />
+    </content_type>
 
   </content_types>
 
@@ -371,6 +376,10 @@
     <!-- FLAGVALUEORIGIN -->
     <primary_type symbol="FLAGVALUEORIGIN" datatype="U1"
                   contenttype="FLAGVALUEORIGIN" type="u1" sizeop="sizeof(u1)" />
+                  
+    <!-- CODEBLOBTYPE -->
+    <primary_type symbol="CODEBLOBTYPE" datatype="U1"
+                  contenttype="CODEBLOBTYPE" type="u1" sizeop="sizeof(u1)" />
 
   </primary_types>
 </types>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/codecache/CheckSegmentedCodeCache.java	Wed Sep 17 08:00:07 2014 +0200
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.oracle.java.testlibrary.*;
+
+/*
+ * @test CheckSegmentedCodeCache
+ * @bug 8015774
+ * @summary "Checks VM options related to the segmented code cache"
+ * @library /testlibrary
+ * @run main/othervm CheckSegmentedCodeCache
+ */
+public class CheckSegmentedCodeCache {
+  // Code heap names
+  private static final String NON_METHOD = "CodeHeap 'non-methods'";
+  private static final String PROFILED = "CodeHeap 'profiled nmethods'";
+  private static final String NON_PROFILED = "CodeHeap 'non-profiled nmethods'";
+
+  private static void verifySegmentedCodeCache(ProcessBuilder pb, boolean enabled) throws Exception {
+    OutputAnalyzer out = new OutputAnalyzer(pb.start());
+    if (enabled) {
+      try {
+        // Non-method code heap should be always available with the segmented code cache
+        out.shouldContain(NON_METHOD);
+      } catch (RuntimeException e) {
+        // TieredCompilation is disabled in a client VM
+        out.shouldContain("TieredCompilation is disabled in this release.");
+      }
+    } else {
+      out.shouldNotContain(NON_METHOD);
+    }
+    out.shouldHaveExitValue(0);
+  }
+
+  private static void verifyCodeHeapNotExists(ProcessBuilder pb, String... heapNames) throws Exception {
+    OutputAnalyzer out = new OutputAnalyzer(pb.start());
+    for (String name : heapNames) {
+      out.shouldNotContain(name);
+    }
+  }
+
+  private static void failsWith(ProcessBuilder pb, String message) throws Exception {
+    OutputAnalyzer out = new OutputAnalyzer(pb.start());
+    out.shouldContain(message);
+    out.shouldHaveExitValue(1);
+  }
+
+  /**
+   * Check the result of segmented code cache related VM options.
+   */
+  public static void main(String[] args) throws Exception {
+    ProcessBuilder pb;
+
+    // Disabled with ReservedCodeCacheSize < 240MB
+    pb = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=239m",
+                                               "-XX:+PrintCodeCache", "-version");
+    verifySegmentedCodeCache(pb, false);
+
+    // Disabled without TieredCompilation
+    pb = ProcessTools.createJavaProcessBuilder("-XX:-TieredCompilation",
+                                               "-XX:+PrintCodeCache", "-version");
+    verifySegmentedCodeCache(pb, false);
+
+    // Enabled with TieredCompilation and ReservedCodeCacheSize >= 240MB
+    pb = ProcessTools.createJavaProcessBuilder("-XX:+TieredCompilation",
+                                               "-XX:ReservedCodeCacheSize=240m",
+                                               "-XX:+PrintCodeCache", "-version");
+    verifySegmentedCodeCache(pb, true);
+
+    // Always enabled if SegmentedCodeCache is set
+    pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
+                                               "-XX:-TieredCompilation",
+                                               "-XX:ReservedCodeCacheSize=239m",
+                                               "-XX:+PrintCodeCache", "-version");
+    verifySegmentedCodeCache(pb, true);
+
+    // The profiled and non-profiled code heaps should not be available in
+    // interpreter-only mode
+    pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
+                                               "-Xint",
+                                               "-XX:+PrintCodeCache", "-version");
+    verifyCodeHeapNotExists(pb, PROFILED, NON_PROFILED);
+    pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
+                                               "-XX:TieredStopAtLevel=0",
+                                               "-XX:+PrintCodeCache", "-version");
+    verifyCodeHeapNotExists(pb, PROFILED, NON_PROFILED);
+
+    // If we stop compilation at CompLevel_simple
+    pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
+                                               "-XX:TieredStopAtLevel=1",
+                                               "-XX:+PrintCodeCache", "-version");
+    verifyCodeHeapNotExists(pb, PROFILED);
+
+    // Fails with too small non-method code heap size
+    pb = ProcessTools.createJavaProcessBuilder("-XX:NonMethodCodeHeapSize=100K");
+    failsWith(pb, "Invalid NonMethodCodeHeapSize");
+
+    // Fails if code heap sizes do not add up
+    pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
+                                               "-XX:ReservedCodeCacheSize=10M",
+                                               "-XX:NonMethodCodeHeapSize=5M",
+                                               "-XX:ProfiledCodeHeapSize=5M",
+                                               "-XX:NonProfiledCodeHeapSize=5M");
+    failsWith(pb, "Invalid code heap sizes");
+
+    // Fails if not enough space for VM internal code
+    pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
+                                               "-XX:ReservedCodeCacheSize=1700K",
+                                               "-XX:InitialCodeCacheSize=100K");
+    failsWith(pb, "Not enough space in non-method code heap to run VM");
+  }
+}