6995781: Native Memory Tracking (Phase 1)
authorzgu
Thu, 28 Jun 2012 17:03:16 -0400
changeset 13195 be27e1b6a4b9
parent 13099 64752e56d721
child 13196 6b399731153b
6995781: Native Memory Tracking (Phase 1) 7151532: DCmd for hotspot native memory tracking Summary: Implementation of native memory tracking phase 1, which tracks VM native memory usage, and related DCmd Reviewed-by: acorn, coleenp, fparain
hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/BasicHashtable.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/BasicHashtableEntry.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/Hashtable.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HashtableBucket.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HashtableEntry.java
hotspot/make/bsd/makefiles/jvmg.make
hotspot/make/linux/makefiles/jvmg.make
hotspot/make/solaris/makefiles/jvmg.make
hotspot/make/windows/makefiles/debug.make
hotspot/src/os/bsd/vm/os_bsd.cpp
hotspot/src/os/bsd/vm/os_bsd.hpp
hotspot/src/os/bsd/vm/os_bsd.inline.hpp
hotspot/src/os/bsd/vm/perfMemory_bsd.cpp
hotspot/src/os/linux/vm/os_linux.cpp
hotspot/src/os/linux/vm/os_linux.hpp
hotspot/src/os/linux/vm/os_linux.inline.hpp
hotspot/src/os/linux/vm/perfMemory_linux.cpp
hotspot/src/os/posix/vm/os_posix.cpp
hotspot/src/os/solaris/dtrace/hs_private.d
hotspot/src/os/solaris/vm/os_solaris.cpp
hotspot/src/os/solaris/vm/os_solaris.hpp
hotspot/src/os/solaris/vm/os_solaris.inline.hpp
hotspot/src/os/solaris/vm/perfMemory_solaris.cpp
hotspot/src/os/windows/vm/os_windows.cpp
hotspot/src/os/windows/vm/os_windows.hpp
hotspot/src/os/windows/vm/perfMemory_windows.cpp
hotspot/src/share/vm/asm/codeBuffer.cpp
hotspot/src/share/vm/c1/c1_CFGPrinter.cpp
hotspot/src/share/vm/c1/c1_Compiler.cpp
hotspot/src/share/vm/c1/c1_LinearScan.cpp
hotspot/src/share/vm/ci/ciObjectFactory.cpp
hotspot/src/share/vm/classfile/classFileParser.cpp
hotspot/src/share/vm/classfile/classLoader.cpp
hotspot/src/share/vm/classfile/classLoader.hpp
hotspot/src/share/vm/classfile/dictionary.cpp
hotspot/src/share/vm/classfile/dictionary.hpp
hotspot/src/share/vm/classfile/javaAssertions.cpp
hotspot/src/share/vm/classfile/javaAssertions.hpp
hotspot/src/share/vm/classfile/loaderConstraints.cpp
hotspot/src/share/vm/classfile/loaderConstraints.hpp
hotspot/src/share/vm/classfile/placeholders.cpp
hotspot/src/share/vm/classfile/placeholders.hpp
hotspot/src/share/vm/classfile/resolutionErrors.cpp
hotspot/src/share/vm/classfile/resolutionErrors.hpp
hotspot/src/share/vm/classfile/symbolTable.cpp
hotspot/src/share/vm/classfile/symbolTable.hpp
hotspot/src/share/vm/classfile/systemDictionary.cpp
hotspot/src/share/vm/classfile/systemDictionary.hpp
hotspot/src/share/vm/code/codeBlob.cpp
hotspot/src/share/vm/code/codeCache.cpp
hotspot/src/share/vm/code/codeCache.hpp
hotspot/src/share/vm/code/nmethod.hpp
hotspot/src/share/vm/code/stubs.hpp
hotspot/src/share/vm/compiler/abstractCompiler.hpp
hotspot/src/share/vm/compiler/compileBroker.cpp
hotspot/src/share/vm/compiler/compileBroker.hpp
hotspot/src/share/vm/compiler/compileLog.cpp
hotspot/src/share/vm/compiler/compilerOracle.cpp
hotspot/src/share/vm/compiler/oopMap.cpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp
hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp
hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp
hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp
hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp
hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp
hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp
hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp
hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp
hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp
hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp
hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp
hotspot/src/share/vm/gc_implementation/g1/ptrQueue.cpp
hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp
hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp
hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp
hotspot/src/share/vm/gc_implementation/g1/survRateGroup.cpp
hotspot/src/share/vm/gc_implementation/g1/survRateGroup.hpp
hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp
hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp
hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp
hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psGenerationCounters.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp
hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp
hotspot/src/share/vm/gc_implementation/shared/cSpaceCounters.cpp
hotspot/src/share/vm/gc_implementation/shared/cSpaceCounters.hpp
hotspot/src/share/vm/gc_implementation/shared/collectorCounters.cpp
hotspot/src/share/vm/gc_implementation/shared/collectorCounters.hpp
hotspot/src/share/vm/gc_implementation/shared/gSpaceCounters.cpp
hotspot/src/share/vm/gc_implementation/shared/gSpaceCounters.hpp
hotspot/src/share/vm/gc_implementation/shared/gcPolicyCounters.hpp
hotspot/src/share/vm/gc_implementation/shared/gcStats.hpp
hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp
hotspot/src/share/vm/gc_implementation/shared/generationCounters.cpp
hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp
hotspot/src/share/vm/gc_implementation/shared/hSpaceCounters.cpp
hotspot/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp
hotspot/src/share/vm/gc_implementation/shared/immutableSpace.hpp
hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp
hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp
hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp
hotspot/src/share/vm/gc_implementation/shared/spaceCounters.cpp
hotspot/src/share/vm/gc_implementation/shared/spaceCounters.hpp
hotspot/src/share/vm/gc_implementation/shared/spaceDecorator.hpp
hotspot/src/share/vm/gc_interface/collectedHeap.hpp
hotspot/src/share/vm/interpreter/interpreterRuntime.cpp
hotspot/src/share/vm/interpreter/oopMapCache.cpp
hotspot/src/share/vm/interpreter/oopMapCache.hpp
hotspot/src/share/vm/libadt/set.cpp
hotspot/src/share/vm/libadt/vectset.cpp
hotspot/src/share/vm/memory/allocation.cpp
hotspot/src/share/vm/memory/allocation.hpp
hotspot/src/share/vm/memory/allocation.inline.hpp
hotspot/src/share/vm/memory/barrierSet.hpp
hotspot/src/share/vm/memory/blockOffsetTable.cpp
hotspot/src/share/vm/memory/blockOffsetTable.hpp
hotspot/src/share/vm/memory/cardTableModRefBS.cpp
hotspot/src/share/vm/memory/collectorPolicy.hpp
hotspot/src/share/vm/memory/defNewGeneration.hpp
hotspot/src/share/vm/memory/filemap.hpp
hotspot/src/share/vm/memory/freeBlockDictionary.hpp
hotspot/src/share/vm/memory/genMarkSweep.cpp
hotspot/src/share/vm/memory/genOopClosures.hpp
hotspot/src/share/vm/memory/genRemSet.hpp
hotspot/src/share/vm/memory/generation.hpp
hotspot/src/share/vm/memory/generationSpec.hpp
hotspot/src/share/vm/memory/heap.cpp
hotspot/src/share/vm/memory/heap.hpp
hotspot/src/share/vm/memory/heapInspection.cpp
hotspot/src/share/vm/memory/heapInspection.hpp
hotspot/src/share/vm/memory/memRegion.hpp
hotspot/src/share/vm/memory/permGen.hpp
hotspot/src/share/vm/memory/referencePolicy.hpp
hotspot/src/share/vm/memory/referenceProcessor.cpp
hotspot/src/share/vm/memory/referenceProcessor.hpp
hotspot/src/share/vm/memory/resourceArea.hpp
hotspot/src/share/vm/memory/restore.cpp
hotspot/src/share/vm/memory/space.hpp
hotspot/src/share/vm/memory/tenuredGeneration.cpp
hotspot/src/share/vm/memory/threadLocalAllocBuffer.hpp
hotspot/src/share/vm/memory/universe.cpp
hotspot/src/share/vm/memory/universe.hpp
hotspot/src/share/vm/oops/constantPoolOop.hpp
hotspot/src/share/vm/oops/instanceKlass.cpp
hotspot/src/share/vm/oops/instanceKlass.hpp
hotspot/src/share/vm/oops/methodOop.hpp
hotspot/src/share/vm/oops/symbol.cpp
hotspot/src/share/vm/opto/idealGraphPrinter.cpp
hotspot/src/share/vm/opto/macro.cpp
hotspot/src/share/vm/opto/runtime.hpp
hotspot/src/share/vm/opto/type.cpp
hotspot/src/share/vm/prims/jni.cpp
hotspot/src/share/vm/prims/jniCheck.cpp
hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.hpp
hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.cpp
hotspot/src/share/vm/prims/jvmtiEnv.cpp
hotspot/src/share/vm/prims/jvmtiEnvBase.cpp
hotspot/src/share/vm/prims/jvmtiEnvBase.hpp
hotspot/src/share/vm/prims/jvmtiEnvThreadState.cpp
hotspot/src/share/vm/prims/jvmtiEnvThreadState.hpp
hotspot/src/share/vm/prims/jvmtiExport.cpp
hotspot/src/share/vm/prims/jvmtiExport.hpp
hotspot/src/share/vm/prims/jvmtiExtensions.cpp
hotspot/src/share/vm/prims/jvmtiGetLoadedClasses.cpp
hotspot/src/share/vm/prims/jvmtiImpl.cpp
hotspot/src/share/vm/prims/jvmtiImpl.hpp
hotspot/src/share/vm/prims/jvmtiRawMonitor.cpp
hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp
hotspot/src/share/vm/prims/jvmtiTagMap.cpp
hotspot/src/share/vm/prims/jvmtiTagMap.hpp
hotspot/src/share/vm/prims/jvmtiThreadState.hpp
hotspot/src/share/vm/prims/jvmtiUtil.cpp
hotspot/src/share/vm/prims/unsafe.cpp
hotspot/src/share/vm/runtime/arguments.cpp
hotspot/src/share/vm/runtime/arguments.hpp
hotspot/src/share/vm/runtime/biasedLocking.cpp
hotspot/src/share/vm/runtime/compilationPolicy.hpp
hotspot/src/share/vm/runtime/deoptimization.cpp
hotspot/src/share/vm/runtime/deoptimization.hpp
hotspot/src/share/vm/runtime/dtraceJSDT.hpp
hotspot/src/share/vm/runtime/fprofiler.cpp
hotspot/src/share/vm/runtime/fprofiler.hpp
hotspot/src/share/vm/runtime/globals.cpp
hotspot/src/share/vm/runtime/globals.hpp
hotspot/src/share/vm/runtime/handles.cpp
hotspot/src/share/vm/runtime/handles.hpp
hotspot/src/share/vm/runtime/handles.inline.hpp
hotspot/src/share/vm/runtime/java.cpp
hotspot/src/share/vm/runtime/jniHandles.hpp
hotspot/src/share/vm/runtime/monitorChunk.cpp
hotspot/src/share/vm/runtime/monitorChunk.hpp
hotspot/src/share/vm/runtime/mutex.hpp
hotspot/src/share/vm/runtime/os.cpp
hotspot/src/share/vm/runtime/os.hpp
hotspot/src/share/vm/runtime/osThread.hpp
hotspot/src/share/vm/runtime/park.cpp
hotspot/src/share/vm/runtime/perfData.cpp
hotspot/src/share/vm/runtime/perfData.hpp
hotspot/src/share/vm/runtime/perfMemory.cpp
hotspot/src/share/vm/runtime/reflectionUtils.cpp
hotspot/src/share/vm/runtime/safepoint.cpp
hotspot/src/share/vm/runtime/safepoint.hpp
hotspot/src/share/vm/runtime/sharedRuntime.cpp
hotspot/src/share/vm/runtime/sharedRuntime.hpp
hotspot/src/share/vm/runtime/stubCodeGenerator.hpp
hotspot/src/share/vm/runtime/sweeper.cpp
hotspot/src/share/vm/runtime/task.hpp
hotspot/src/share/vm/runtime/thread.cpp
hotspot/src/share/vm/runtime/thread.hpp
hotspot/src/share/vm/runtime/unhandledOops.cpp
hotspot/src/share/vm/runtime/vframeArray.cpp
hotspot/src/share/vm/runtime/vframeArray.hpp
hotspot/src/share/vm/runtime/vframe_hp.cpp
hotspot/src/share/vm/runtime/vframe_hp.hpp
hotspot/src/share/vm/runtime/virtualspace.cpp
hotspot/src/share/vm/runtime/vmStructs.cpp
hotspot/src/share/vm/runtime/vmThread.hpp
hotspot/src/share/vm/runtime/vm_operations.hpp
hotspot/src/share/vm/services/attachListener.cpp
hotspot/src/share/vm/services/attachListener.hpp
hotspot/src/share/vm/services/diagnosticArgument.cpp
hotspot/src/share/vm/services/diagnosticArgument.hpp
hotspot/src/share/vm/services/diagnosticFramework.hpp
hotspot/src/share/vm/services/gcNotifier.cpp
hotspot/src/share/vm/services/gcNotifier.hpp
hotspot/src/share/vm/services/heapDumper.cpp
hotspot/src/share/vm/services/lowMemoryDetector.hpp
hotspot/src/share/vm/services/management.cpp
hotspot/src/share/vm/services/memBaseline.cpp
hotspot/src/share/vm/services/memBaseline.hpp
hotspot/src/share/vm/services/memPtr.cpp
hotspot/src/share/vm/services/memPtr.hpp
hotspot/src/share/vm/services/memPtrArray.hpp
hotspot/src/share/vm/services/memRecorder.cpp
hotspot/src/share/vm/services/memRecorder.hpp
hotspot/src/share/vm/services/memReporter.cpp
hotspot/src/share/vm/services/memReporter.hpp
hotspot/src/share/vm/services/memSnapshot.cpp
hotspot/src/share/vm/services/memSnapshot.hpp
hotspot/src/share/vm/services/memTrackWorker.cpp
hotspot/src/share/vm/services/memTrackWorker.hpp
hotspot/src/share/vm/services/memTracker.cpp
hotspot/src/share/vm/services/memTracker.hpp
hotspot/src/share/vm/services/memoryManager.cpp
hotspot/src/share/vm/services/memoryManager.hpp
hotspot/src/share/vm/services/memoryPool.hpp
hotspot/src/share/vm/services/memoryService.cpp
hotspot/src/share/vm/services/nmtDCmd.cpp
hotspot/src/share/vm/services/nmtDCmd.hpp
hotspot/src/share/vm/services/threadService.cpp
hotspot/src/share/vm/services/threadService.hpp
hotspot/src/share/vm/utilities/array.cpp
hotspot/src/share/vm/utilities/array.hpp
hotspot/src/share/vm/utilities/bitMap.cpp
hotspot/src/share/vm/utilities/decoder.hpp
hotspot/src/share/vm/utilities/elfFile.cpp
hotspot/src/share/vm/utilities/elfFile.hpp
hotspot/src/share/vm/utilities/elfStringTable.cpp
hotspot/src/share/vm/utilities/elfStringTable.hpp
hotspot/src/share/vm/utilities/elfSymbolTable.cpp
hotspot/src/share/vm/utilities/elfSymbolTable.hpp
hotspot/src/share/vm/utilities/events.hpp
hotspot/src/share/vm/utilities/exceptions.hpp
hotspot/src/share/vm/utilities/growableArray.cpp
hotspot/src/share/vm/utilities/growableArray.hpp
hotspot/src/share/vm/utilities/hashtable.cpp
hotspot/src/share/vm/utilities/hashtable.hpp
hotspot/src/share/vm/utilities/hashtable.inline.hpp
hotspot/src/share/vm/utilities/histogram.cpp
hotspot/src/share/vm/utilities/histogram.hpp
hotspot/src/share/vm/utilities/intHisto.cpp
hotspot/src/share/vm/utilities/intHisto.hpp
hotspot/src/share/vm/utilities/numberSeq.cpp
hotspot/src/share/vm/utilities/numberSeq.hpp
hotspot/src/share/vm/utilities/ostream.cpp
hotspot/src/share/vm/utilities/stack.hpp
hotspot/src/share/vm/utilities/stack.inline.hpp
hotspot/src/share/vm/utilities/taskqueue.hpp
hotspot/src/share/vm/utilities/vmError.cpp
hotspot/src/share/vm/utilities/workgroup.cpp
hotspot/src/share/vm/utilities/workgroup.hpp
hotspot/src/share/vm/utilities/xmlstream.cpp
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/BasicHashtable.java	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/BasicHashtable.java	Thu Jun 28 17:03:16 2012 -0400
@@ -41,10 +41,10 @@
   }
 
   private static synchronized void initialize(TypeDataBase db) {
-    Type type = db.lookupType("BasicHashtable");
+    Type type = db.lookupType("BasicHashtable<mtInternal>");
     tableSizeField = type.getCIntegerField("_table_size");
     bucketsField   = type.getAddressField("_buckets");
-    bucketSize = db.lookupType("HashtableBucket").getSize();
+    bucketSize = db.lookupType("HashtableBucket<mtInternal>").getSize();
   }
 
   // Fields
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/BasicHashtableEntry.java	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/BasicHashtableEntry.java	Thu Jun 28 17:03:16 2012 -0400
@@ -41,7 +41,7 @@
   }
 
   private static synchronized void initialize(TypeDataBase db) {
-    Type type = db.lookupType("BasicHashtableEntry");
+    Type type = db.lookupType("BasicHashtableEntry<mtInternal>");
     hashField      = type.getCIntegerField("_hash");
     nextField      = type.getAddressField("_next");
   }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/Hashtable.java	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/Hashtable.java	Thu Jun 28 17:03:16 2012 -0400
@@ -40,7 +40,7 @@
 
   private static synchronized void initialize(TypeDataBase db) {
     // just to confirm that type exists
-    Type type = db.lookupType("Hashtable<intptr_t>");
+    Type type = db.lookupType("IntptrHashtable");
   }
 
   // derived class may return Class<? extends HashtableEntry>
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HashtableBucket.java	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HashtableBucket.java	Thu Jun 28 17:03:16 2012 -0400
@@ -39,7 +39,7 @@
   }
 
   private static synchronized void initialize(TypeDataBase db) {
-    Type type = db.lookupType("HashtableBucket");
+    Type type = db.lookupType("HashtableBucket<mtInternal>");
     entryField = type.getAddressField("_entry");
   }
 
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HashtableEntry.java	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HashtableEntry.java	Thu Jun 28 17:03:16 2012 -0400
@@ -41,7 +41,7 @@
   }
 
   private static synchronized void initialize(TypeDataBase db) {
-    Type type = db.lookupType("HashtableEntry<intptr_t>");
+    Type type = db.lookupType("IntptrHashtableEntry");
     literalField   = type.getAddressField("_literal");
   }
 
--- a/hotspot/make/bsd/makefiles/jvmg.make	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/make/bsd/makefiles/jvmg.make	Thu Jun 28 17:03:16 2012 -0400
@@ -27,7 +27,9 @@
 # Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make
 DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS)
 DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@))
-CFLAGS += $(DEBUG_CFLAGS/BYFILE)
+
+# _NMT_NOINLINE_ informs NMT that no inlining by Compiler
+CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_
 
 # Set the environment variable HOTSPARC_GENERIC to "true"
 # to inhibit the effect of the previous line on CFLAGS.
--- a/hotspot/make/linux/makefiles/jvmg.make	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/make/linux/makefiles/jvmg.make	Thu Jun 28 17:03:16 2012 -0400
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,9 @@
 # Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make
 DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS)
 DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@))
-CFLAGS += $(DEBUG_CFLAGS/BYFILE)
+
+# _NMT_NOINLINE_ informs NMT that no inlining by Compiler
+CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_
 
 # Set the environment variable HOTSPARC_GENERIC to "true"
 # to inhibit the effect of the previous line on CFLAGS.
--- a/hotspot/make/solaris/makefiles/jvmg.make	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/make/solaris/makefiles/jvmg.make	Thu Jun 28 17:03:16 2012 -0400
@@ -37,7 +37,8 @@
 endif
 endif
 
-CFLAGS += $(DEBUG_CFLAGS/BYFILE)
+# _NMT_NOINLINE_ informs NMT that no inlining by Compiler
+CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_
 
 # Set the environment variable HOTSPARC_GENERIC to "true"
 # to inhibit the effect of the previous line on CFLAGS.
--- a/hotspot/make/windows/makefiles/debug.make	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/make/windows/makefiles/debug.make	Thu Jun 28 17:03:16 2012 -0400
@@ -38,7 +38,8 @@
 !include ../local.make
 !include compile.make
 
-CXX_FLAGS=$(CXX_FLAGS) $(DEBUG_OPT_OPTION)
+# _NMT_NOINLINE_ informs NMT that no inlining by Compiler
+CXX_FLAGS=$(CXX_FLAGS) $(DEBUG_OPT_OPTION) /D "_NMT_NOINLINE_"
 
 !include $(WorkSpace)/make/windows/makefiles/vm.make
 !include local.make
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -440,7 +440,7 @@
   // code needs to be changed accordingly.
 
   // The next few definitions allow the code to be verbatim:
-#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n))
+#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n), mtInternal)
 #define getenv(n) ::getenv(n)
 
 /*
@@ -1913,11 +1913,11 @@
     // release the storage
     for (int i = 0 ; i < n ; i++) {
       if (pelements[i] != NULL) {
-        FREE_C_HEAP_ARRAY(char, pelements[i]);
+        FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
       }
     }
     if (pelements != NULL) {
-      FREE_C_HEAP_ARRAY(char*, pelements);
+      FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
     }
   } else {
     snprintf(buffer, buflen, "%s/" JNI_LIB_PREFIX "%s" JNI_LIB_SUFFIX, pname, fname);
@@ -2766,7 +2766,7 @@
 //       All it does is to check if there are enough free pages
 //       left at the time of mmap(). This could be a potential
 //       problem.
-bool os::commit_memory(char* addr, size_t size, bool exec) {
+bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
 #ifdef __OpenBSD__
   // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
@@ -2790,7 +2790,7 @@
 #endif
 #endif
 
-bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
+bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
                        bool exec) {
 #ifndef _ALLBSD_SOURCE
   if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
@@ -2806,7 +2806,7 @@
   return commit_memory(addr, size, exec);
 }
 
-void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
+void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
 #ifndef _ALLBSD_SOURCE
   if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
     // We don't check the return value: madvise(MADV_HUGEPAGE) may not
@@ -2816,7 +2816,7 @@
 #endif
 }
 
-void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) {
+void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
   ::madvise(addr, bytes, MADV_DONTNEED);
 }
 
@@ -2958,7 +2958,7 @@
 unsigned long* os::Bsd::_numa_all_nodes;
 #endif
 
-bool os::uncommit_memory(char* addr, size_t size) {
+bool os::pd_uncommit_memory(char* addr, size_t size) {
 #ifdef __OpenBSD__
   // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
   return ::mprotect(addr, size, PROT_NONE) == 0;
@@ -2969,7 +2969,7 @@
 #endif
 }
 
-bool os::create_stack_guard_pages(char* addr, size_t size) {
+bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
   return os::commit_memory(addr, size);
 }
 
@@ -3023,12 +3023,12 @@
   return ::munmap(addr, size) == 0;
 }
 
-char* os::reserve_memory(size_t bytes, char* requested_addr,
+char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
                          size_t alignment_hint) {
   return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
 }
 
-bool os::release_memory(char* addr, size_t size) {
+bool os::pd_release_memory(char* addr, size_t size) {
   return anon_munmap(addr, size);
 }
 
@@ -3331,7 +3331,7 @@
 // Reserve memory at an arbitrary address, only if that area is
 // available (and not reserved for something else).
 
-char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
+char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
   const int max_tries = 10;
   char* base[max_tries];
   size_t size[max_tries];
@@ -4987,7 +4987,7 @@
 }
 
 // Map a block of memory.
-char* os::map_memory(int fd, const char* file_name, size_t file_offset,
+char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
                      char *addr, size_t bytes, bool read_only,
                      bool allow_exec) {
   int prot;
@@ -5019,7 +5019,7 @@
 
 
 // Remap a block of memory.
-char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
+char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
                        char *addr, size_t bytes, bool read_only,
                        bool allow_exec) {
   // same as map_memory() on this OS
@@ -5029,7 +5029,7 @@
 
 
 // Unmap a block of memory.
-bool os::unmap_memory(char* addr, size_t bytes) {
+bool os::pd_unmap_memory(char* addr, size_t bytes) {
   return munmap(addr, bytes) == 0;
 }
 
--- a/hotspot/src/os/bsd/vm/os_bsd.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/os/bsd/vm/os_bsd.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -312,7 +312,7 @@
 };
 
 
-class PlatformEvent : public CHeapObj {
+class PlatformEvent : public CHeapObj<mtInternal> {
   private:
     double CachePad [4] ;   // increase odds that _mutex is sole occupant of cache line
     volatile int _Event ;
@@ -347,7 +347,7 @@
     void SetAssociation (Thread * a) { _Assoc = a ; }
 } ;
 
-class PlatformParker : public CHeapObj {
+class PlatformParker : public CHeapObj<mtInternal> {
   protected:
     pthread_mutex_t _mutex [1] ;
     pthread_cond_t  _cond  [1] ;
--- a/hotspot/src/os/bsd/vm/os_bsd.inline.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/os/bsd/vm/os_bsd.inline.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -95,7 +95,7 @@
 
 
 // On Bsd, reservations are made on a page by page basis, nothing to do.
-inline void os::split_reserved_memory(char *base, size_t size,
+inline void os::pd_split_reserved_memory(char *base, size_t size,
                                       size_t split, bool realloc) {
 }
 
--- a/hotspot/src/os/bsd/vm/perfMemory_bsd.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/os/bsd/vm/perfMemory_bsd.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -126,7 +126,7 @@
       }
     }
   }
-  FREE_C_HEAP_ARRAY(char, destfile);
+  FREE_C_HEAP_ARRAY(char, destfile, mtInternal);
 }
 
 
@@ -153,7 +153,7 @@
   const char* tmpdir = os::get_temp_directory();
   const char* perfdir = PERFDATA_NAME;
   size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3;
-  char* dirname = NEW_C_HEAP_ARRAY(char, nbytes);
+  char* dirname = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
 
   // construct the path name to user specific tmp directory
   snprintf(dirname, nbytes, "%s/%s_%s", tmpdir, perfdir, user);
@@ -246,7 +246,7 @@
   if (bufsize == -1)
     bufsize = 1024;
 
-  char* pwbuf = NEW_C_HEAP_ARRAY(char, bufsize);
+  char* pwbuf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 
   // POSIX interface to getpwuid_r is used on LINUX
   struct passwd* p;
@@ -278,14 +278,14 @@
                                      "pw_name zero length");
       }
     }
-    FREE_C_HEAP_ARRAY(char, pwbuf);
+    FREE_C_HEAP_ARRAY(char, pwbuf, mtInternal);
     return NULL;
   }
 
-  char* user_name = NEW_C_HEAP_ARRAY(char, strlen(p->pw_name) + 1);
+  char* user_name = NEW_C_HEAP_ARRAY(char, strlen(p->pw_name) + 1, mtInternal);
   strcpy(user_name, p->pw_name);
 
-  FREE_C_HEAP_ARRAY(char, pwbuf);
+  FREE_C_HEAP_ARRAY(char, pwbuf, mtInternal);
   return user_name;
 }
 
@@ -328,7 +328,7 @@
   // to determine the user name for the process id.
   //
   struct dirent* dentry;
-  char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname));
+  char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname), mtInternal);
   errno = 0;
   while ((dentry = os::readdir(tmpdirp, (struct dirent *)tdbuf)) != NULL) {
 
@@ -338,7 +338,7 @@
     }
 
     char* usrdir_name = NEW_C_HEAP_ARRAY(char,
-                              strlen(tmpdirname) + strlen(dentry->d_name) + 2);
+                 strlen(tmpdirname) + strlen(dentry->d_name) + 2, mtInternal);
     strcpy(usrdir_name, tmpdirname);
     strcat(usrdir_name, "/");
     strcat(usrdir_name, dentry->d_name);
@@ -346,7 +346,7 @@
     DIR* subdirp = os::opendir(usrdir_name);
 
     if (subdirp == NULL) {
-      FREE_C_HEAP_ARRAY(char, usrdir_name);
+      FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
       continue;
     }
 
@@ -357,13 +357,13 @@
     // symlink can be exploited.
     //
     if (!is_directory_secure(usrdir_name)) {
-      FREE_C_HEAP_ARRAY(char, usrdir_name);
+      FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
       os::closedir(subdirp);
       continue;
     }
 
     struct dirent* udentry;
-    char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name));
+    char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name), mtInternal);
     errno = 0;
     while ((udentry = os::readdir(subdirp, (struct dirent *)udbuf)) != NULL) {
 
@@ -372,7 +372,7 @@
         int result;
 
         char* filename = NEW_C_HEAP_ARRAY(char,
-                            strlen(usrdir_name) + strlen(udentry->d_name) + 2);
+                 strlen(usrdir_name) + strlen(udentry->d_name) + 2, mtInternal);
 
         strcpy(filename, usrdir_name);
         strcat(filename, "/");
@@ -381,13 +381,13 @@
         // don't follow symbolic links for the file
         RESTARTABLE(::lstat(filename, &statbuf), result);
         if (result == OS_ERR) {
-           FREE_C_HEAP_ARRAY(char, filename);
+           FREE_C_HEAP_ARRAY(char, filename, mtInternal);
            continue;
         }
 
         // skip over files that are not regular files.
         if (!S_ISREG(statbuf.st_mode)) {
-          FREE_C_HEAP_ARRAY(char, filename);
+          FREE_C_HEAP_ARRAY(char, filename, mtInternal);
           continue;
         }
 
@@ -397,23 +397,23 @@
           if (statbuf.st_ctime > oldest_ctime) {
             char* user = strchr(dentry->d_name, '_') + 1;
 
-            if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user);
-            oldest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1);
+            if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user, mtInternal);
+            oldest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1, mtInternal);
 
             strcpy(oldest_user, user);
             oldest_ctime = statbuf.st_ctime;
           }
         }
 
-        FREE_C_HEAP_ARRAY(char, filename);
+        FREE_C_HEAP_ARRAY(char, filename, mtInternal);
       }
     }
     os::closedir(subdirp);
-    FREE_C_HEAP_ARRAY(char, udbuf);
-    FREE_C_HEAP_ARRAY(char, usrdir_name);
+    FREE_C_HEAP_ARRAY(char, udbuf, mtInternal);
+    FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
   }
   os::closedir(tmpdirp);
-  FREE_C_HEAP_ARRAY(char, tdbuf);
+  FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal);
 
   return(oldest_user);
 }
@@ -434,7 +434,7 @@
   // add 2 for the file separator and a null terminator.
   size_t nbytes = strlen(dirname) + UINT_CHARS + 2;
 
-  char* name = NEW_C_HEAP_ARRAY(char, nbytes);
+  char* name = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
   snprintf(name, nbytes, "%s/%d", dirname, vmid);
 
   return name;
@@ -472,7 +472,7 @@
 static void remove_file(const char* dirname, const char* filename) {
 
   size_t nbytes = strlen(dirname) + strlen(filename) + 2;
-  char* path = NEW_C_HEAP_ARRAY(char, nbytes);
+  char* path = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
 
   strcpy(path, dirname);
   strcat(path, "/");
@@ -480,7 +480,7 @@
 
   remove_file(path);
 
-  FREE_C_HEAP_ARRAY(char, path);
+  FREE_C_HEAP_ARRAY(char, path, mtInternal);
 }
 
 
@@ -517,7 +517,7 @@
   // opendir/readdir.
   //
   struct dirent* entry;
-  char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname));
+  char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal);
   errno = 0;
   while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) {
 
@@ -556,7 +556,7 @@
     errno = 0;
   }
   os::closedir(dirp);
-  FREE_C_HEAP_ARRAY(char, dbuf);
+  FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
 }
 
 // make the user specific temporary directory. Returns true if
@@ -723,11 +723,11 @@
 
   fd = create_sharedmem_resources(dirname, filename, size);
 
-  FREE_C_HEAP_ARRAY(char, user_name);
-  FREE_C_HEAP_ARRAY(char, dirname);
+  FREE_C_HEAP_ARRAY(char, user_name, mtInternal);
+  FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
 
   if (fd == -1) {
-    FREE_C_HEAP_ARRAY(char, filename);
+    FREE_C_HEAP_ARRAY(char, filename, mtInternal);
     return NULL;
   }
 
@@ -743,7 +743,7 @@
       warning("mmap failed -  %s\n", strerror(errno));
     }
     remove_file(filename);
-    FREE_C_HEAP_ARRAY(char, filename);
+    FREE_C_HEAP_ARRAY(char, filename, mtInternal);
     return NULL;
   }
 
@@ -869,7 +869,7 @@
   // store file, we don't follow them when attaching either.
   //
   if (!is_directory_secure(dirname)) {
-    FREE_C_HEAP_ARRAY(char, dirname);
+    FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
               "Process not found");
   }
@@ -884,9 +884,9 @@
   strcpy(rfilename, filename);
 
   // free the c heap resources that are no longer needed
-  if (luser != user) FREE_C_HEAP_ARRAY(char, luser);
-  FREE_C_HEAP_ARRAY(char, dirname);
-  FREE_C_HEAP_ARRAY(char, filename);
+  if (luser != user) FREE_C_HEAP_ARRAY(char, luser, mtInternal);
+  FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
+  FREE_C_HEAP_ARRAY(char, filename, mtInternal);
 
   // open the shared memory file for the give vmid
   fd = open_sharedmem_file(rfilename, file_flags, CHECK);
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -371,7 +371,7 @@
   // code needs to be changed accordingly.
 
   // The next few definitions allow the code to be verbatim:
-#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n))
+#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n), mtInternal)
 #define getenv(n) ::getenv(n)
 
 /*
@@ -639,7 +639,7 @@
 
   size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0);
   if (n > 0) {
-     char *str = (char *)malloc(n);
+     char *str = (char *)malloc(n, mtInternal);
      confstr(_CS_GNU_LIBC_VERSION, str, n);
      os::Linux::set_glibc_version(str);
   } else {
@@ -652,7 +652,7 @@
 
   n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0);
   if (n > 0) {
-     char *str = (char *)malloc(n);
+     char *str = (char *)malloc(n, mtInternal);
      confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);
      // Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells
      // us "NPTL-0.29" even we are running with LinuxThreads. Check if this
@@ -1685,11 +1685,11 @@
     // release the storage
     for (int i = 0 ; i < n ; i++) {
       if (pelements[i] != NULL) {
-        FREE_C_HEAP_ARRAY(char, pelements[i]);
+        FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
       }
     }
     if (pelements != NULL) {
-      FREE_C_HEAP_ARRAY(char*, pelements);
+      FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
     }
   } else {
     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
@@ -2469,7 +2469,7 @@
 //       All it does is to check if there are enough free pages
 //       left at the time of mmap(). This could be a potential
 //       problem.
-bool os::commit_memory(char* addr, size_t size, bool exec) {
+bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
   uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
                                    MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
@@ -2492,7 +2492,7 @@
 #define MADV_HUGEPAGE 14
 #endif
 
-bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
+bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
                        bool exec) {
   if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
     int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
@@ -2516,7 +2516,7 @@
   return false;
 }
 
-void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
+void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
   if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
     // We don't check the return value: madvise(MADV_HUGEPAGE) may not
     // be supported or the memory may already be backed by huge pages.
@@ -2524,7 +2524,7 @@
   }
 }
 
-void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) {
+void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
   // This method works by doing an mmap over an existing mmaping and effectively discarding
   // the existing pages. However it won't work for SHM-based large pages that cannot be
   // uncommitted at all. We don't do anything in this case to avoid creating a segment with
@@ -2646,7 +2646,7 @@
       if (numa_available() != -1) {
         set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
         // Create a cpu -> node mapping
-        _cpu_to_node = new (ResourceObj::C_HEAP) GrowableArray<int>(0, true);
+        _cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
         rebuild_cpu_to_node_map();
         return true;
       }
@@ -2676,7 +2676,7 @@
   cpu_to_node()->at_grow(cpu_num - 1);
   size_t node_num = numa_get_groups_num();
 
-  unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size);
+  unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size, mtInternal);
   for (size_t i = 0; i < node_num; i++) {
     if (numa_node_to_cpus(i, cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) {
       for (size_t j = 0; j < cpu_map_valid_size; j++) {
@@ -2690,7 +2690,7 @@
       }
     }
   }
-  FREE_C_HEAP_ARRAY(unsigned long, cpu_map);
+  FREE_C_HEAP_ARRAY(unsigned long, cpu_map, mtInternal);
 }
 
 int os::Linux::get_node_by_cpu(int cpu_id) {
@@ -2709,7 +2709,7 @@
 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
 unsigned long* os::Linux::_numa_all_nodes;
 
-bool os::uncommit_memory(char* addr, size_t size) {
+bool os::pd_uncommit_memory(char* addr, size_t size) {
   uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
                 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
   return res  != (uintptr_t) MAP_FAILED;
@@ -2774,7 +2774,7 @@
 // munmap() the guard pages we don't leave a hole in the stack
 // mapping. This only affects the main/initial thread, but guard
 // against future OS changes
-bool os::create_stack_guard_pages(char* addr, size_t size) {
+bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
   uintptr_t stack_extent, stack_base;
   bool chk_bounds = NOT_DEBUG(os::Linux::is_initial_thread()) DEBUG_ONLY(true);
   if (chk_bounds && get_stack_bounds(&stack_extent, &stack_base)) {
@@ -2847,12 +2847,12 @@
   return ::munmap(addr, size) == 0;
 }
 
-char* os::reserve_memory(size_t bytes, char* requested_addr,
+char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
                          size_t alignment_hint) {
   return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
 }
 
-bool os::release_memory(char* addr, size_t size) {
+bool os::pd_release_memory(char* addr, size_t size) {
   return anon_munmap(addr, size);
 }
 
@@ -3149,7 +3149,7 @@
 // Reserve memory at an arbitrary address, only if that area is
 // available (and not reserved for something else).
 
-char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
+char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
   const int max_tries = 10;
   char* base[max_tries];
   size_t size[max_tries];
@@ -4671,7 +4671,7 @@
 }
 
 // Map a block of memory.
-char* os::map_memory(int fd, const char* file_name, size_t file_offset,
+char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
                      char *addr, size_t bytes, bool read_only,
                      bool allow_exec) {
   int prot;
@@ -4701,7 +4701,7 @@
 
 
 // Remap a block of memory.
-char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
+char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
                        char *addr, size_t bytes, bool read_only,
                        bool allow_exec) {
   // same as map_memory() on this OS
@@ -4711,7 +4711,7 @@
 
 
 // Unmap a block of memory.
-bool os::unmap_memory(char* addr, size_t bytes) {
+bool os::pd_unmap_memory(char* addr, size_t bytes) {
   return munmap(addr, bytes) == 0;
 }
 
--- a/hotspot/src/os/linux/vm/os_linux.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/os/linux/vm/os_linux.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -287,7 +287,7 @@
 };
 
 
-class PlatformEvent : public CHeapObj {
+class PlatformEvent : public CHeapObj<mtInternal> {
   private:
     double CachePad [4] ;   // increase odds that _mutex is sole occupant of cache line
     volatile int _Event ;
@@ -322,7 +322,7 @@
     void SetAssociation (Thread * a) { _Assoc = a ; }
 } ;
 
-class PlatformParker : public CHeapObj {
+class PlatformParker : public CHeapObj<mtInternal> {
   protected:
     pthread_mutex_t _mutex [1] ;
     pthread_cond_t  _cond  [1] ;
--- a/hotspot/src/os/linux/vm/os_linux.inline.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/os/linux/vm/os_linux.inline.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -99,7 +99,7 @@
 
 
 // On Linux, reservations are made on a page by page basis, nothing to do.
-inline void os::split_reserved_memory(char *base, size_t size,
+inline void os::pd_split_reserved_memory(char *base, size_t size,
                                       size_t split, bool realloc) {
 }
 
--- a/hotspot/src/os/linux/vm/perfMemory_linux.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/os/linux/vm/perfMemory_linux.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -126,7 +126,7 @@
       }
     }
   }
-  FREE_C_HEAP_ARRAY(char, destfile);
+  FREE_C_HEAP_ARRAY(char, destfile, mtInternal);
 }
 
 
@@ -153,7 +153,7 @@
   const char* tmpdir = os::get_temp_directory();
   const char* perfdir = PERFDATA_NAME;
   size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3;
-  char* dirname = NEW_C_HEAP_ARRAY(char, nbytes);
+  char* dirname = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
 
   // construct the path name to user specific tmp directory
   snprintf(dirname, nbytes, "%s/%s_%s", tmpdir, perfdir, user);
@@ -246,7 +246,7 @@
   if (bufsize == -1)
     bufsize = 1024;
 
-  char* pwbuf = NEW_C_HEAP_ARRAY(char, bufsize);
+  char* pwbuf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 
   // POSIX interface to getpwuid_r is used on LINUX
   struct passwd* p;
@@ -278,14 +278,14 @@
                                      "pw_name zero length");
       }
     }
-    FREE_C_HEAP_ARRAY(char, pwbuf);
+    FREE_C_HEAP_ARRAY(char, pwbuf, mtInternal);
     return NULL;
   }
 
-  char* user_name = NEW_C_HEAP_ARRAY(char, strlen(p->pw_name) + 1);
+  char* user_name = NEW_C_HEAP_ARRAY(char, strlen(p->pw_name) + 1, mtInternal);
   strcpy(user_name, p->pw_name);
 
-  FREE_C_HEAP_ARRAY(char, pwbuf);
+  FREE_C_HEAP_ARRAY(char, pwbuf, mtInternal);
   return user_name;
 }
 
@@ -328,7 +328,7 @@
   // to determine the user name for the process id.
   //
   struct dirent* dentry;
-  char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname));
+  char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname), mtInternal);
   errno = 0;
   while ((dentry = os::readdir(tmpdirp, (struct dirent *)tdbuf)) != NULL) {
 
@@ -338,7 +338,7 @@
     }
 
     char* usrdir_name = NEW_C_HEAP_ARRAY(char,
-                              strlen(tmpdirname) + strlen(dentry->d_name) + 2);
+                     strlen(tmpdirname) + strlen(dentry->d_name) + 2, mtInternal);
     strcpy(usrdir_name, tmpdirname);
     strcat(usrdir_name, "/");
     strcat(usrdir_name, dentry->d_name);
@@ -346,7 +346,7 @@
     DIR* subdirp = os::opendir(usrdir_name);
 
     if (subdirp == NULL) {
-      FREE_C_HEAP_ARRAY(char, usrdir_name);
+      FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
       continue;
     }
 
@@ -357,13 +357,13 @@
     // symlink can be exploited.
     //
     if (!is_directory_secure(usrdir_name)) {
-      FREE_C_HEAP_ARRAY(char, usrdir_name);
+      FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
       os::closedir(subdirp);
       continue;
     }
 
     struct dirent* udentry;
-    char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name));
+    char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name), mtInternal);
     errno = 0;
     while ((udentry = os::readdir(subdirp, (struct dirent *)udbuf)) != NULL) {
 
@@ -372,7 +372,7 @@
         int result;
 
         char* filename = NEW_C_HEAP_ARRAY(char,
-                            strlen(usrdir_name) + strlen(udentry->d_name) + 2);
+                   strlen(usrdir_name) + strlen(udentry->d_name) + 2, mtInternal);
 
         strcpy(filename, usrdir_name);
         strcat(filename, "/");
@@ -381,13 +381,13 @@
         // don't follow symbolic links for the file
         RESTARTABLE(::lstat(filename, &statbuf), result);
         if (result == OS_ERR) {
-           FREE_C_HEAP_ARRAY(char, filename);
+           FREE_C_HEAP_ARRAY(char, filename, mtInternal);
            continue;
         }
 
         // skip over files that are not regular files.
         if (!S_ISREG(statbuf.st_mode)) {
-          FREE_C_HEAP_ARRAY(char, filename);
+          FREE_C_HEAP_ARRAY(char, filename, mtInternal);
           continue;
         }
 
@@ -397,23 +397,23 @@
           if (statbuf.st_ctime > oldest_ctime) {
             char* user = strchr(dentry->d_name, '_') + 1;
 
-            if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user);
-            oldest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1);
+            if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user, mtInternal);
+            oldest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1, mtInternal);
 
             strcpy(oldest_user, user);
             oldest_ctime = statbuf.st_ctime;
           }
         }
 
-        FREE_C_HEAP_ARRAY(char, filename);
+        FREE_C_HEAP_ARRAY(char, filename, mtInternal);
       }
     }
     os::closedir(subdirp);
-    FREE_C_HEAP_ARRAY(char, udbuf);
-    FREE_C_HEAP_ARRAY(char, usrdir_name);
+    FREE_C_HEAP_ARRAY(char, udbuf, mtInternal);
+    FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
   }
   os::closedir(tmpdirp);
-  FREE_C_HEAP_ARRAY(char, tdbuf);
+  FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal);
 
   return(oldest_user);
 }
@@ -434,7 +434,7 @@
   // add 2 for the file separator and a null terminator.
   size_t nbytes = strlen(dirname) + UINT_CHARS + 2;
 
-  char* name = NEW_C_HEAP_ARRAY(char, nbytes);
+  char* name = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
   snprintf(name, nbytes, "%s/%d", dirname, vmid);
 
   return name;
@@ -472,7 +472,7 @@
 static void remove_file(const char* dirname, const char* filename) {
 
   size_t nbytes = strlen(dirname) + strlen(filename) + 2;
-  char* path = NEW_C_HEAP_ARRAY(char, nbytes);
+  char* path = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
 
   strcpy(path, dirname);
   strcat(path, "/");
@@ -480,7 +480,7 @@
 
   remove_file(path);
 
-  FREE_C_HEAP_ARRAY(char, path);
+  FREE_C_HEAP_ARRAY(char, path, mtInternal);
 }
 
 
@@ -517,7 +517,7 @@
   // opendir/readdir.
   //
   struct dirent* entry;
-  char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname));
+  char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal);
   errno = 0;
   while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) {
 
@@ -556,7 +556,7 @@
     errno = 0;
   }
   os::closedir(dirp);
-  FREE_C_HEAP_ARRAY(char, dbuf);
+  FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
 }
 
 // make the user specific temporary directory. Returns true if
@@ -723,11 +723,11 @@
 
   fd = create_sharedmem_resources(dirname, filename, size);
 
-  FREE_C_HEAP_ARRAY(char, user_name);
-  FREE_C_HEAP_ARRAY(char, dirname);
+  FREE_C_HEAP_ARRAY(char, user_name, mtInternal);
+  FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
 
   if (fd == -1) {
-    FREE_C_HEAP_ARRAY(char, filename);
+    FREE_C_HEAP_ARRAY(char, filename, mtInternal);
     return NULL;
   }
 
@@ -743,7 +743,7 @@
       warning("mmap failed -  %s\n", strerror(errno));
     }
     remove_file(filename);
-    FREE_C_HEAP_ARRAY(char, filename);
+    FREE_C_HEAP_ARRAY(char, filename, mtInternal);
     return NULL;
   }
 
@@ -869,7 +869,7 @@
   // store file, we don't follow them when attaching either.
   //
   if (!is_directory_secure(dirname)) {
-    FREE_C_HEAP_ARRAY(char, dirname);
+    FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
               "Process not found");
   }
@@ -884,9 +884,9 @@
   strcpy(rfilename, filename);
 
   // free the c heap resources that are no longer needed
-  if (luser != user) FREE_C_HEAP_ARRAY(char, luser);
-  FREE_C_HEAP_ARRAY(char, dirname);
-  FREE_C_HEAP_ARRAY(char, filename);
+  if (luser != user) FREE_C_HEAP_ARRAY(char, luser, mtInternal);
+  FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
+  FREE_C_HEAP_ARRAY(char, filename, mtInternal);
 
   // open the shared memory file for the give vmid
   fd = open_sharedmem_file(rfilename, file_flags, CHECK);
--- a/hotspot/src/os/posix/vm/os_posix.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/os/posix/vm/os_posix.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -23,6 +23,7 @@
 */
 
 #include "prims/jvm.h"
+#include "runtime/frame.inline.hpp"
 #include "runtime/os.hpp"
 #include "utilities/vmError.hpp"
 
@@ -61,6 +62,23 @@
   VMError::report_coredump_status(buffer, success);
 }
 
+address os::get_caller_pc(int n) {
+#ifdef _NMT_NOINLINE_
+  n ++;
+#endif
+  frame fr = os::current_frame();
+  while (n > 0 && fr.pc() &&
+    !os::is_first_C_frame(&fr) && fr.sender_pc()) {
+    fr = os::get_sender_for_C_frame(&fr);
+    n --;
+  }
+  if (n == 0) {
+    return fr.pc();
+  } else {
+    return NULL;
+  }
+}
+
 int os::get_last_error() {
   return errno;
 }
--- a/hotspot/src/os/solaris/dtrace/hs_private.d	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/os/solaris/dtrace/hs_private.d	Thu Jun 28 17:03:16 2012 -0400
@@ -23,7 +23,6 @@
  */
 
 provider hs_private {
-  probe hashtable__new_entry(void*, uintptr_t, void*); 
   probe safepoint__begin();
   probe safepoint__end();
   probe cms__initmark__begin();
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -546,7 +546,7 @@
   // Find the number of processors in the processor set.
   if (pset_info(pset, NULL, id_length, NULL) == 0) {
     // Make up an array to hold their ids.
-    *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length);
+    *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
     // Fill in the array with their processor ids.
     if (pset_info(pset, NULL, id_length, *id_array) == 0) {
       result = true;
@@ -577,7 +577,7 @@
   // Find the number of processors online.
   *id_length = sysconf(_SC_NPROCESSORS_ONLN);
   // Make up an array to hold their ids.
-  *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length);
+  *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
   // Processors need not be numbered consecutively.
   long found = 0;
   processorid_t next = 0;
@@ -629,7 +629,7 @@
   // The next id, to limit loops.
   const processorid_t limit_id = max_id + 1;
   // Make up markers for available processors.
-  bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id);
+  bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
   for (uint c = 0; c < limit_id; c += 1) {
     available_id[c] = false;
   }
@@ -666,7 +666,7 @@
     }
   }
   if (available_id != NULL) {
-    FREE_C_HEAP_ARRAY(bool, available_id);
+    FREE_C_HEAP_ARRAY(bool, available_id, mtInternal);
   }
   return true;
 }
@@ -698,7 +698,7 @@
     }
   }
   if (id_array != NULL) {
-    FREE_C_HEAP_ARRAY(processorid_t, id_array);
+    FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal);
   }
   return result;
 }
@@ -771,8 +771,8 @@
   // code needs to be changed accordingly.
 
   // The next few definitions allow the code to be verbatim:
-#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n))
-#define free(p) FREE_C_HEAP_ARRAY(char, p)
+#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n), mtInternal)
+#define free(p) FREE_C_HEAP_ARRAY(char, p, mtInternal)
 #define getenv(n) ::getenv(n)
 
 #define EXTENSIONS_DIR  "/lib/ext"
@@ -1927,11 +1927,11 @@
     // release the storage
     for (int i = 0 ; i < n ; i++) {
       if (pelements[i] != NULL) {
-        FREE_C_HEAP_ARRAY(char, pelements[i]);
+        FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
       }
     }
     if (pelements != NULL) {
-      FREE_C_HEAP_ARRAY(char*, pelements);
+      FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
     }
   } else {
     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
@@ -2662,17 +2662,17 @@
 
   // pending_signals has one int per signal
   // The additional signal is for SIGEXIT - exit signal to signal_thread
-  pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1));
+  pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
 
   if (UseSignalChaining) {
      chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
-       * (Maxsignum + 1));
+       * (Maxsignum + 1), mtInternal);
      memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
-     preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1));
+     preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
      memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
   }
-  ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ));
+  ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal);
   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
 }
 
@@ -2760,7 +2760,7 @@
   return page_size;
 }
 
-bool os::commit_memory(char* addr, size_t bytes, bool exec) {
+bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
   size_t size = bytes;
   char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
@@ -2773,7 +2773,7 @@
   return false;
 }
 
-bool os::commit_memory(char* addr, size_t bytes, size_t alignment_hint,
+bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
                        bool exec) {
   if (commit_memory(addr, bytes, exec)) {
     if (UseMPSS && alignment_hint > (size_t)vm_page_size()) {
@@ -2803,14 +2803,14 @@
 }
 
 // Uncommit the pages in a specified region.
-void os::free_memory(char* addr, size_t bytes, size_t alignment_hint) {
+void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
   if (madvise(addr, bytes, MADV_FREE) < 0) {
     debug_only(warning("MADV_FREE failed."));
     return;
   }
 }
 
-bool os::create_stack_guard_pages(char* addr, size_t size) {
+bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
   return os::commit_memory(addr, size);
 }
 
@@ -2819,7 +2819,7 @@
 }
 
 // Change the page size in a given range.
-void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
+void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
   if (UseLargePages && UseMPSS) {
@@ -3006,7 +3006,7 @@
   return end;
 }
 
-bool os::uncommit_memory(char* addr, size_t bytes) {
+bool os::pd_uncommit_memory(char* addr, size_t bytes) {
   size_t size = bytes;
   // Map uncommitted pages PROT_NONE so we fail early if we touch an
   // uncommitted page. Otherwise, the read/write might succeed if we
@@ -3045,7 +3045,7 @@
   return mmap_chunk(addr, bytes, flags, PROT_NONE);
 }
 
-char* os::reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
+char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
   char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
 
   guarantee(requested_addr == NULL || requested_addr == addr,
@@ -3056,7 +3056,7 @@
 // Reserve memory at an arbitrary address, only if that area is
 // available (and not reserved for something else).
 
-char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
+char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
   const int max_tries = 10;
   char* base[max_tries];
   size_t size[max_tries];
@@ -3178,7 +3178,7 @@
   return (i < max_tries) ? requested_addr : NULL;
 }
 
-bool os::release_memory(char* addr, size_t bytes) {
+bool os::pd_release_memory(char* addr, size_t bytes) {
   size_t size = bytes;
   return munmap(addr, size) == 0;
 }
@@ -4792,7 +4792,7 @@
   lwpSize = 16*1024;
   for (;;) {
     ::lseek64 (lwpFile, 0, SEEK_SET);
-    lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize);
+    lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize, mtInternal);
     if (::read(lwpFile, lwpArray, lwpSize) < 0) {
       if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n");
       break;
@@ -4810,10 +4810,10 @@
       break;
     }
     lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize;
-    FREE_C_HEAP_ARRAY(char, lwpArray);  // retry.
-  }
-
-  FREE_C_HEAP_ARRAY(char, lwpArray);
+    FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);  // retry.
+  }
+
+  FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);
   ::close (lwpFile);
   if (ThreadPriorityVerbose) {
     if (isT2) tty->print_cr("We are running with a T2 libthread\n");
@@ -5137,9 +5137,9 @@
       UseNUMA = false;
     } else {
       size_t lgrp_limit = os::numa_get_groups_num();
-      int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit);
+      int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
       size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
-      FREE_C_HEAP_ARRAY(int, lgrp_ids);
+      FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal);
       if (lgrp_num < 2) {
         // There's only one locality group, disable NUMA.
         UseNUMA = false;
@@ -5485,7 +5485,7 @@
 }
 
 // Map a block of memory.
-char* os::map_memory(int fd, const char* file_name, size_t file_offset,
+char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
                      char *addr, size_t bytes, bool read_only,
                      bool allow_exec) {
   int prot;
@@ -5517,7 +5517,7 @@
 
 
 // Remap a block of memory.
-char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
+char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
                        char *addr, size_t bytes, bool read_only,
                        bool allow_exec) {
   // same as map_memory() on this OS
@@ -5527,7 +5527,7 @@
 
 
 // Unmap a block of memory.
-bool os::unmap_memory(char* addr, size_t bytes) {
+bool os::pd_unmap_memory(char* addr, size_t bytes) {
   return munmap(addr, bytes) == 0;
 }
 
--- a/hotspot/src/os/solaris/vm/os_solaris.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/os/solaris/vm/os_solaris.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -346,7 +346,7 @@
 
 };
 
-class PlatformEvent : public CHeapObj {
+class PlatformEvent : public CHeapObj<mtInternal> {
   private:
     double CachePad [4] ;   // increase odds that _mutex is sole occupant of cache line
     volatile int _Event ;
@@ -383,7 +383,7 @@
     void unpark () ;
 } ;
 
-class PlatformParker : public CHeapObj {
+class PlatformParker : public CHeapObj<mtInternal> {
   protected:
     mutex_t _mutex [1] ;
     cond_t  _cond  [1] ;
--- a/hotspot/src/os/solaris/vm/os_solaris.inline.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/os/solaris/vm/os_solaris.inline.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -71,7 +71,7 @@
 
 
 // On Solaris, reservations are made on a page by page basis, nothing to do.
-inline void os::split_reserved_memory(char *base, size_t size,
+inline void os::pd_split_reserved_memory(char *base, size_t size,
                                       size_t split, bool realloc) {
 }
 
--- a/hotspot/src/os/solaris/vm/perfMemory_solaris.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/os/solaris/vm/perfMemory_solaris.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -128,7 +128,7 @@
       }
     }
   }
-  FREE_C_HEAP_ARRAY(char, destfile);
+  FREE_C_HEAP_ARRAY(char, destfile, mtInternal);
 }
 
 
@@ -155,7 +155,7 @@
   const char* tmpdir = os::get_temp_directory();
   const char* perfdir = PERFDATA_NAME;
   size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3;
-  char* dirname = NEW_C_HEAP_ARRAY(char, nbytes);
+  char* dirname = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
 
   // construct the path name to user specific tmp directory
   snprintf(dirname, nbytes, "%s/%s_%s", tmpdir, perfdir, user);
@@ -248,7 +248,7 @@
   if (bufsize == -1)
     bufsize = 1024;
 
-  char* pwbuf = NEW_C_HEAP_ARRAY(char, bufsize);
+  char* pwbuf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 
 #ifdef _GNU_SOURCE
   struct passwd* p = NULL;
@@ -269,14 +269,14 @@
                                      "pw_name zero length");
       }
     }
-    FREE_C_HEAP_ARRAY(char, pwbuf);
+    FREE_C_HEAP_ARRAY(char, pwbuf, mtInternal);
     return NULL;
   }
 
-  char* user_name = NEW_C_HEAP_ARRAY(char, strlen(p->pw_name) + 1);
+  char* user_name = NEW_C_HEAP_ARRAY(char, strlen(p->pw_name) + 1, mtInternal);
   strcpy(user_name, p->pw_name);
 
-  FREE_C_HEAP_ARRAY(char, pwbuf);
+  FREE_C_HEAP_ARRAY(char, pwbuf, mtInternal);
   return user_name;
 }
 
@@ -319,7 +319,7 @@
   // to determine the user name for the process id.
   //
   struct dirent* dentry;
-  char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname));
+  char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname), mtInternal);
   errno = 0;
   while ((dentry = os::readdir(tmpdirp, (struct dirent *)tdbuf)) != NULL) {
 
@@ -329,7 +329,7 @@
     }
 
     char* usrdir_name = NEW_C_HEAP_ARRAY(char,
-                              strlen(tmpdirname) + strlen(dentry->d_name) + 2);
+                  strlen(tmpdirname) + strlen(dentry->d_name) + 2, mtInternal);
     strcpy(usrdir_name, tmpdirname);
     strcat(usrdir_name, "/");
     strcat(usrdir_name, dentry->d_name);
@@ -337,7 +337,7 @@
     DIR* subdirp = os::opendir(usrdir_name);
 
     if (subdirp == NULL) {
-      FREE_C_HEAP_ARRAY(char, usrdir_name);
+      FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
       continue;
     }
 
@@ -348,13 +348,13 @@
     // symlink can be exploited.
     //
     if (!is_directory_secure(usrdir_name)) {
-      FREE_C_HEAP_ARRAY(char, usrdir_name);
+      FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
       os::closedir(subdirp);
       continue;
     }
 
     struct dirent* udentry;
-    char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name));
+    char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name), mtInternal);
     errno = 0;
     while ((udentry = os::readdir(subdirp, (struct dirent *)udbuf)) != NULL) {
 
@@ -363,7 +363,7 @@
         int result;
 
         char* filename = NEW_C_HEAP_ARRAY(char,
-                            strlen(usrdir_name) + strlen(udentry->d_name) + 2);
+                 strlen(usrdir_name) + strlen(udentry->d_name) + 2, mtInternal);
 
         strcpy(filename, usrdir_name);
         strcat(filename, "/");
@@ -372,13 +372,13 @@
         // don't follow symbolic links for the file
         RESTARTABLE(::lstat(filename, &statbuf), result);
         if (result == OS_ERR) {
-           FREE_C_HEAP_ARRAY(char, filename);
+           FREE_C_HEAP_ARRAY(char, filename, mtInternal);
            continue;
         }
 
         // skip over files that are not regular files.
         if (!S_ISREG(statbuf.st_mode)) {
-          FREE_C_HEAP_ARRAY(char, filename);
+          FREE_C_HEAP_ARRAY(char, filename, mtInternal);
           continue;
         }
 
@@ -388,23 +388,23 @@
           if (statbuf.st_ctime > oldest_ctime) {
             char* user = strchr(dentry->d_name, '_') + 1;
 
-            if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user);
-            oldest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1);
+            if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user, mtInternal);
+            oldest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1, mtInternal);
 
             strcpy(oldest_user, user);
             oldest_ctime = statbuf.st_ctime;
           }
         }
 
-        FREE_C_HEAP_ARRAY(char, filename);
+        FREE_C_HEAP_ARRAY(char, filename, mtInternal);
       }
     }
     os::closedir(subdirp);
-    FREE_C_HEAP_ARRAY(char, udbuf);
-    FREE_C_HEAP_ARRAY(char, usrdir_name);
+    FREE_C_HEAP_ARRAY(char, udbuf, mtInternal);
+    FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
   }
   os::closedir(tmpdirp);
-  FREE_C_HEAP_ARRAY(char, tdbuf);
+  FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal);
 
   return(oldest_user);
 }
@@ -471,7 +471,7 @@
   // add 2 for the file separator and a NULL terminator.
   size_t nbytes = strlen(dirname) + UINT_CHARS + 2;
 
-  char* name = NEW_C_HEAP_ARRAY(char, nbytes);
+  char* name = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
   snprintf(name, nbytes, "%s/%d", dirname, vmid);
 
   return name;
@@ -509,7 +509,7 @@
 static void remove_file(const char* dirname, const char* filename) {
 
   size_t nbytes = strlen(dirname) + strlen(filename) + 2;
-  char* path = NEW_C_HEAP_ARRAY(char, nbytes);
+  char* path = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
 
   strcpy(path, dirname);
   strcat(path, "/");
@@ -517,7 +517,7 @@
 
   remove_file(path);
 
-  FREE_C_HEAP_ARRAY(char, path);
+  FREE_C_HEAP_ARRAY(char, path, mtInternal);
 }
 
 
@@ -554,7 +554,7 @@
   // opendir/readdir.
   //
   struct dirent* entry;
-  char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname));
+  char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal);
   errno = 0;
   while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) {
 
@@ -593,7 +593,7 @@
     errno = 0;
   }
   os::closedir(dirp);
-  FREE_C_HEAP_ARRAY(char, dbuf);
+  FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
 }
 
 // make the user specific temporary directory. Returns true if
@@ -738,11 +738,11 @@
 
   fd = create_sharedmem_resources(dirname, filename, size);
 
-  FREE_C_HEAP_ARRAY(char, user_name);
-  FREE_C_HEAP_ARRAY(char, dirname);
+  FREE_C_HEAP_ARRAY(char, user_name, mtInternal);
+  FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
 
   if (fd == -1) {
-    FREE_C_HEAP_ARRAY(char, filename);
+    FREE_C_HEAP_ARRAY(char, filename, mtInternal);
     return NULL;
   }
 
@@ -758,7 +758,7 @@
       warning("mmap failed -  %s\n", strerror(errno));
     }
     remove_file(filename);
-    FREE_C_HEAP_ARRAY(char, filename);
+    FREE_C_HEAP_ARRAY(char, filename, mtInternal);
     return NULL;
   }
 
@@ -884,7 +884,7 @@
   // store file, we don't follow them when attaching either.
   //
   if (!is_directory_secure(dirname)) {
-    FREE_C_HEAP_ARRAY(char, dirname);
+    FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
               "Process not found");
   }
@@ -899,9 +899,9 @@
   strcpy(rfilename, filename);
 
   // free the c heap resources that are no longer needed
-  if (luser != user) FREE_C_HEAP_ARRAY(char, luser);
-  FREE_C_HEAP_ARRAY(char, dirname);
-  FREE_C_HEAP_ARRAY(char, filename);
+  if (luser != user) FREE_C_HEAP_ARRAY(char, luser, mtInternal);
+  FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
+  FREE_C_HEAP_ARRAY(char, filename, mtInternal);
 
   // open the shared memory file for the give vmid
   fd = open_sharedmem_file(rfilename, file_flags, CHECK);
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -96,7 +96,6 @@
 #include <io.h>
 #include <process.h>              // For _beginthreadex(), _endthreadex()
 #include <imagehlp.h>             // For os::dll_address_to_function_name
-
 /* for enumerating dll libraries */
 #include <vdmdbg.h>
 
@@ -214,13 +213,13 @@
           }
       }
 
-      home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1);
+      home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
       if (home_path == NULL)
           return;
       strcpy(home_path, home_dir);
       Arguments::set_java_home(home_path);
 
-      dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1);
+      dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, mtInternal);
       if (dll_path == NULL)
           return;
       strcpy(dll_path, home_dir);
@@ -251,7 +250,7 @@
     char *path_str = ::getenv("PATH");
 
     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
-        sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10);
+        sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 
     library_path[0] = '\0';
 
@@ -280,7 +279,7 @@
     strcat(library_path, ";.");
 
     Arguments::set_library_path(library_path);
-    FREE_C_HEAP_ARRAY(char, library_path);
+    FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
   }
 
   /* Default extensions directory */
@@ -300,7 +299,7 @@
   {
     #define ENDORSED_DIR "\\lib\\endorsed"
     size_t len = strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR);
-    char * buf = NEW_C_HEAP_ARRAY(char, len);
+    char * buf = NEW_C_HEAP_ARRAY(char, len, mtInternal);
     sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR);
     Arguments::set_endorsed_dirs(buf);
     #undef ENDORSED_DIR
@@ -324,6 +323,23 @@
   os::breakpoint();
 }
 
+/*
+ * RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
+ * So far, this method is only used by Native Memory Tracking, which is
+ * only supported on Windows XP or later.
+ */
+address os::get_caller_pc(int n) {
+#ifdef _NMT_NOINLINE_
+  n ++;
+#endif
+  address pc;
+  if (os::Kernel32Dll::RtlCaptureStackBackTrace(n + 1, 1, (PVOID*)&pc, NULL) == 1) {
+    return pc;
+  }
+  return NULL;
+}
+
+
 // os::current_stack_base()
 //
 //   Returns the base of the stack, which is the stack's
@@ -1014,7 +1030,7 @@
 os::opendir(const char *dirname)
 {
     assert(dirname != NULL, "just checking");   // hotspot change
-    DIR *dirp = (DIR *)malloc(sizeof(DIR));
+    DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
     DWORD fattr;                                // hotspot change
     char alt_dirname[4] = { 0, 0, 0, 0 };
 
@@ -1036,9 +1052,9 @@
         dirname = alt_dirname;
     }
 
-    dirp->path = (char *)malloc(strlen(dirname) + 5);
+    dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
     if (dirp->path == 0) {
-        free(dirp);
+        free(dirp, mtInternal);
         errno = ENOMEM;
         return 0;
     }
@@ -1046,13 +1062,13 @@
 
     fattr = GetFileAttributes(dirp->path);
     if (fattr == 0xffffffff) {
-        free(dirp->path);
-        free(dirp);
+        free(dirp->path, mtInternal);
+        free(dirp, mtInternal);
         errno = ENOENT;
         return 0;
     } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
-        free(dirp->path);
-        free(dirp);
+        free(dirp->path, mtInternal);
+        free(dirp, mtInternal);
         errno = ENOTDIR;
         return 0;
     }
@@ -1070,8 +1086,8 @@
     dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
     if (dirp->handle == INVALID_HANDLE_VALUE) {
         if (GetLastError() != ERROR_FILE_NOT_FOUND) {
-            free(dirp->path);
-            free(dirp);
+            free(dirp->path, mtInternal);
+            free(dirp, mtInternal);
             errno = EACCES;
             return 0;
         }
@@ -1114,8 +1130,8 @@
         }
         dirp->handle = INVALID_HANDLE_VALUE;
     }
-    free(dirp->path);
-    free(dirp);
+    free(dirp->path, mtInternal);
+    free(dirp, mtInternal);
     return 0;
 }
 
@@ -1176,11 +1192,11 @@
     // release the storage
     for (int i = 0 ; i < n ; i++) {
       if (pelements[i] != NULL) {
-        FREE_C_HEAP_ARRAY(char, pelements[i]);
+        FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
       }
     }
     if (pelements != NULL) {
-      FREE_C_HEAP_ARRAY(char*, pelements);
+      FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
     }
   } else {
     jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname);
@@ -2637,7 +2653,7 @@
 
   void free_node_list() {
     if (_numa_used_node_list != NULL) {
-      FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
+      FREE_C_HEAP_ARRAY(int, _numa_used_node_list, mtInternal);
     }
   }
 
@@ -2659,7 +2675,7 @@
     ULONG highest_node_number;
     if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false;
     free_node_list();
-    _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1);
+    _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
     for (unsigned int i = 0; i <= highest_node_number; i++) {
       ULONGLONG proc_mask_numa_node;
       if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
@@ -2918,7 +2934,7 @@
 // On win32, one cannot release just a part of reserved memory, it's an
 // all or nothing deal.  When we split a reservation, we must break the
 // reservation into two reservations.
-void os::split_reserved_memory(char *base, size_t size, size_t split,
+void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
                               bool realloc) {
   if (size > 0) {
     release_memory(base, size);
@@ -2931,7 +2947,7 @@
   }
 }
 
-char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
+char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
   assert((size_t)addr % os::vm_allocation_granularity() == 0,
          "reserve alignment");
   assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size");
@@ -2964,7 +2980,7 @@
 
 // Reserve memory at an arbitrary address, only if that area is
 // available (and not reserved for something else).
-char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
+char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
   // Windows os::reserve_memory() fails of the requested address range is
   // not avilable.
   return reserve_memory(bytes, requested_addr);
@@ -3027,7 +3043,7 @@
 void os::print_statistics() {
 }
 
-bool os::commit_memory(char* addr, size_t bytes, bool exec) {
+bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
   if (bytes == 0) {
     // Don't bother the OS with noops.
     return true;
@@ -3075,26 +3091,26 @@
   return true;
 }
 
-bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
+bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
                        bool exec) {
   return commit_memory(addr, size, exec);
 }
 
-bool os::uncommit_memory(char* addr, size_t bytes) {
+bool os::pd_uncommit_memory(char* addr, size_t bytes) {
   if (bytes == 0) {
     // Don't bother the OS with noops.
     return true;
   }
   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
-  return VirtualFree(addr, bytes, MEM_DECOMMIT) != 0;
-}
-
-bool os::release_memory(char* addr, size_t bytes) {
+  return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
+}
+
+bool os::pd_release_memory(char* addr, size_t bytes) {
   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
 }
 
-bool os::create_stack_guard_pages(char* addr, size_t size) {
+bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
   return os::commit_memory(addr, size);
 }
 
@@ -3141,8 +3157,8 @@
   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
 }
 
-void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
-void os::free_memory(char *addr, size_t bytes, size_t alignment_hint)    { }
+void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
+void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
 void os::numa_make_global(char *addr, size_t bytes)    { }
 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
 bool os::numa_topology_changed()                       { return false; }
@@ -4276,14 +4292,14 @@
     numEvents = MAX_INPUT_EVENTS;
   }
 
-  lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD));
+  lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
   if (lpBuffer == NULL) {
     return FALSE;
   }
 
   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
   if (error == 0) {
-    os::free(lpBuffer);
+    os::free(lpBuffer, mtInternal);
     return FALSE;
   }
 
@@ -4304,7 +4320,7 @@
   }
 
   if(lpBuffer != NULL) {
-    os::free(lpBuffer);
+    os::free(lpBuffer, mtInternal);
   }
 
   *pbytes = (long) actualLength;
@@ -4312,7 +4328,7 @@
 }
 
 // Map a block of memory.
-char* os::map_memory(int fd, const char* file_name, size_t file_offset,
+char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
                      char *addr, size_t bytes, bool read_only,
                      bool allow_exec) {
   HANDLE hFile;
@@ -4432,7 +4448,7 @@
 
 
 // Remap a block of memory.
-char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
+char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
                        char *addr, size_t bytes, bool read_only,
                        bool allow_exec) {
   // This OS does not allow existing memory maps to be remapped so we
@@ -4445,15 +4461,15 @@
   // call above and the map_memory() call below where a thread in native
   // code may be able to access an address that is no longer mapped.
 
-  return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
-                        allow_exec);
+  return os::map_memory(fd, file_name, file_offset, addr, bytes,
+           read_only, allow_exec);
 }
 
 
 // Unmap a block of memory.
 // Returns true=success, otherwise false.
 
-bool os::unmap_memory(char* addr, size_t bytes) {
+bool os::pd_unmap_memory(char* addr, size_t bytes) {
   BOOL result = UnmapViewOfFile(addr);
   if (result == 0) {
     if (PrintMiscellaneous && Verbose) {
@@ -4931,11 +4947,15 @@
 typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD);
 typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn) (PULONG);
 typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn) (UCHAR, PULONGLONG);
+typedef USHORT (WINAPI* RtlCaptureStackBackTrace_Fn)(ULONG, ULONG, PVOID*, PULONG);
 
 GetLargePageMinimum_Fn      os::Kernel32Dll::_GetLargePageMinimum = NULL;
 VirtualAllocExNuma_Fn       os::Kernel32Dll::_VirtualAllocExNuma = NULL;
 GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL;
 GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL;
+RtlCaptureStackBackTrace_Fn os::Kernel32Dll::_RtlCaptureStackBackTrace = NULL;
+
+
 BOOL                        os::Kernel32Dll::initialized = FALSE;
 SIZE_T os::Kernel32Dll::GetLargePageMinimum() {
   assert(initialized && _GetLargePageMinimum != NULL,
@@ -4978,6 +4998,19 @@
   return _GetNumaNodeProcessorMask(node, proc_mask);
 }
 
+USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip,
+  ULONG FrameToCapture, PVOID* BackTrace, PULONG BackTraceHash) {
+    if (!initialized) {
+      initialize();
+    }
+
+    if (_RtlCaptureStackBackTrace != NULL) {
+      return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture,
+        BackTrace, BackTraceHash);
+    } else {
+      return 0;
+    }
+}
 
 void os::Kernel32Dll::initializeCommon() {
   if (!initialized) {
@@ -4987,6 +5020,7 @@
     _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma");
     _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber");
     _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask");
+    _RtlCaptureStackBackTrace = (RtlCaptureStackBackTrace_Fn)::GetProcAddress(handle, "RtlCaptureStackBackTrace");
     initialized = TRUE;
   }
 }
@@ -5101,7 +5135,6 @@
 Module32Next_Fn             os::Kernel32Dll::_Module32Next = NULL;
 GetNativeSystemInfo_Fn      os::Kernel32Dll::_GetNativeSystemInfo = NULL;
 
-
 void os::Kernel32Dll::initialize() {
   if (!initialized) {
     HMODULE handle = ::GetModuleHandle("Kernel32.dll");
@@ -5179,8 +5212,6 @@
   _GetNativeSystemInfo(lpSystemInfo);
 }
 
-
-
 // PSAPI API
 
 
--- a/hotspot/src/os/windows/vm/os_windows.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/os/windows/vm/os_windows.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -98,7 +98,7 @@
   static LONG WINAPI serialize_fault_filter(struct _EXCEPTION_POINTERS* e);
 };
 
-class PlatformEvent : public CHeapObj {
+class PlatformEvent : public CHeapObj<mtInternal> {
   private:
     double CachePad [4] ;   // increase odds that _Event is sole occupant of cache line
     volatile int _Event ;
@@ -124,7 +124,7 @@
 
 
 
-class PlatformParker : public CHeapObj {
+class PlatformParker : public CHeapObj<mtInternal> {
   protected:
     HANDLE _ParkEvent ;
 
@@ -182,6 +182,9 @@
   static BOOL GetNumaHighestNodeNumber(PULONG);
   static BOOL GetNumaNodeProcessorMask(UCHAR, PULONGLONG);
 
+  // Stack walking
+  static USHORT RtlCaptureStackBackTrace(ULONG, ULONG, PVOID*, PULONG);
+
 private:
   // GetLargePageMinimum available on Windows Vista/Windows Server 2003
   // and later
@@ -191,6 +194,7 @@
   static LPVOID (WINAPI *_VirtualAllocExNuma) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD);
   static BOOL (WINAPI *_GetNumaHighestNodeNumber) (PULONG);
   static BOOL (WINAPI *_GetNumaNodeProcessorMask) (UCHAR, PULONGLONG);
+  static USHORT (WINAPI *_RtlCaptureStackBackTrace)(ULONG, ULONG, PVOID*, PULONG);
   static BOOL initialized;
 
   static void initialize();
--- a/hotspot/src/os/windows/vm/perfMemory_windows.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/os/windows/vm/perfMemory_windows.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -120,7 +120,7 @@
     }
   }
 
-  FREE_C_HEAP_ARRAY(char, destfile);
+  FREE_C_HEAP_ARRAY(char, destfile, mtInternal);
 }
 
 // Shared Memory Implementation Details
@@ -157,7 +157,7 @@
   const char* tmpdir = os::get_temp_directory();
   const char* perfdir = PERFDATA_NAME;
   size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3;
-  char* dirname = NEW_C_HEAP_ARRAY(char, nbytes);
+  char* dirname = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
 
   // construct the path name to user specific tmp directory
   _snprintf(dirname, nbytes, "%s\\%s_%s", tmpdir, perfdir, user);
@@ -281,7 +281,7 @@
     }
   }
 
-  char* user_name = NEW_C_HEAP_ARRAY(char, strlen(user)+1);
+  char* user_name = NEW_C_HEAP_ARRAY(char, strlen(user)+1, mtInternal);
   strcpy(user_name, user);
 
   return user_name;
@@ -315,7 +315,7 @@
   // to determine the user name for the process id.
   //
   struct dirent* dentry;
-  char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname));
+  char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname), mtInternal);
   errno = 0;
   while ((dentry = os::readdir(tmpdirp, (struct dirent *)tdbuf)) != NULL) {
 
@@ -325,7 +325,7 @@
     }
 
     char* usrdir_name = NEW_C_HEAP_ARRAY(char,
-                              strlen(tmpdirname) + strlen(dentry->d_name) + 2);
+        strlen(tmpdirname) + strlen(dentry->d_name) + 2, mtInternal);
     strcpy(usrdir_name, tmpdirname);
     strcat(usrdir_name, "\\");
     strcat(usrdir_name, dentry->d_name);
@@ -333,7 +333,7 @@
     DIR* subdirp = os::opendir(usrdir_name);
 
     if (subdirp == NULL) {
-      FREE_C_HEAP_ARRAY(char, usrdir_name);
+      FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
       continue;
     }
 
@@ -344,13 +344,13 @@
     // symlink can be exploited.
     //
     if (!is_directory_secure(usrdir_name)) {
-      FREE_C_HEAP_ARRAY(char, usrdir_name);
+      FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
       os::closedir(subdirp);
       continue;
     }
 
     struct dirent* udentry;
-    char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name));
+    char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name), mtInternal);
     errno = 0;
     while ((udentry = os::readdir(subdirp, (struct dirent *)udbuf)) != NULL) {
 
@@ -358,20 +358,20 @@
         struct stat statbuf;
 
         char* filename = NEW_C_HEAP_ARRAY(char,
-                            strlen(usrdir_name) + strlen(udentry->d_name) + 2);
+           strlen(usrdir_name) + strlen(udentry->d_name) + 2, mtInternal);
 
         strcpy(filename, usrdir_name);
         strcat(filename, "\\");
         strcat(filename, udentry->d_name);
 
         if (::stat(filename, &statbuf) == OS_ERR) {
-           FREE_C_HEAP_ARRAY(char, filename);
+           FREE_C_HEAP_ARRAY(char, filename, mtInternal);
            continue;
         }
 
         // skip over files that are not regular files.
         if ((statbuf.st_mode & S_IFMT) != S_IFREG) {
-          FREE_C_HEAP_ARRAY(char, filename);
+          FREE_C_HEAP_ARRAY(char, filename, mtInternal);
           continue;
         }
 
@@ -393,22 +393,22 @@
         if (statbuf.st_ctime > latest_ctime) {
           char* user = strchr(dentry->d_name, '_') + 1;
 
-          if (latest_user != NULL) FREE_C_HEAP_ARRAY(char, latest_user);
-          latest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1);
+          if (latest_user != NULL) FREE_C_HEAP_ARRAY(char, latest_user, mtInternal);
+          latest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1, mtInternal);
 
           strcpy(latest_user, user);
           latest_ctime = statbuf.st_ctime;
         }
 
-        FREE_C_HEAP_ARRAY(char, filename);
+        FREE_C_HEAP_ARRAY(char, filename, mtInternal);
       }
     }
     os::closedir(subdirp);
-    FREE_C_HEAP_ARRAY(char, udbuf);
-    FREE_C_HEAP_ARRAY(char, usrdir_name);
+    FREE_C_HEAP_ARRAY(char, udbuf, mtInternal);
+    FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
   }
   os::closedir(tmpdirp);
-  FREE_C_HEAP_ARRAY(char, tdbuf);
+  FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal);
 
   return(latest_user);
 }
@@ -453,7 +453,7 @@
   // about a name containing a '-' characters.
   //
   nbytes += UINT_CHARS;
-  char* name = NEW_C_HEAP_ARRAY(char, nbytes);
+  char* name = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
   _snprintf(name, nbytes, "%s_%s_%u", PERFDATA_NAME, user, vmid);
 
   return name;
@@ -469,7 +469,7 @@
   // add 2 for the file separator and a null terminator.
   size_t nbytes = strlen(dirname) + UINT_CHARS + 2;
 
-  char* name = NEW_C_HEAP_ARRAY(char, nbytes);
+  char* name = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
   _snprintf(name, nbytes, "%s\\%d", dirname, vmid);
 
   return name;
@@ -485,7 +485,7 @@
 static void remove_file(const char* dirname, const char* filename) {
 
   size_t nbytes = strlen(dirname) + strlen(filename) + 2;
-  char* path = NEW_C_HEAP_ARRAY(char, nbytes);
+  char* path = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
 
   strcpy(path, dirname);
   strcat(path, "\\");
@@ -500,7 +500,7 @@
     }
   }
 
-  FREE_C_HEAP_ARRAY(char, path);
+  FREE_C_HEAP_ARRAY(char, path, mtInternal);
 }
 
 // returns true if the process represented by pid is alive, otherwise
@@ -638,7 +638,7 @@
   // opendir/readdir.
   //
   struct dirent* entry;
-  char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname));
+  char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal);
   errno = 0;
   while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) {
 
@@ -681,7 +681,7 @@
     errno = 0;
   }
   os::closedir(dirp);
-  FREE_C_HEAP_ARRAY(char, dbuf);
+  FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
 }
 
 // create a file mapping object with the requested name, and size
@@ -747,11 +747,11 @@
     // be an ACL we enlisted. free the resources.
     //
     if (success && exists && pACL != NULL && !isdefault) {
-      FREE_C_HEAP_ARRAY(char, pACL);
+      FREE_C_HEAP_ARRAY(char, pACL, mtInternal);
     }
 
     // free the security descriptor
-    FREE_C_HEAP_ARRAY(char, pSD);
+    FREE_C_HEAP_ARRAY(char, pSD, mtInternal);
   }
 }
 
@@ -766,7 +766,7 @@
     lpSA->lpSecurityDescriptor = NULL;
 
     // free the security attributes structure
-    FREE_C_HEAP_ARRAY(char, lpSA);
+    FREE_C_HEAP_ARRAY(char, lpSA, mtInternal);
   }
 }
 
@@ -805,7 +805,7 @@
     }
   }
 
-  token_buf = (PTOKEN_USER) NEW_C_HEAP_ARRAY(char, rsize);
+  token_buf = (PTOKEN_USER) NEW_C_HEAP_ARRAY(char, rsize, mtInternal);
 
   // get the user token information
   if (!GetTokenInformation(hAccessToken, TokenUser, token_buf, rsize, &rsize)) {
@@ -813,28 +813,28 @@
       warning("GetTokenInformation failure: lasterror = %d,"
               " rsize = %d\n", GetLastError(), rsize);
     }
-    FREE_C_HEAP_ARRAY(char, token_buf);
+    FREE_C_HEAP_ARRAY(char, token_buf, mtInternal);
     CloseHandle(hAccessToken);
     return NULL;
   }
 
   DWORD nbytes = GetLengthSid(token_buf->User.Sid);
-  PSID pSID = NEW_C_HEAP_ARRAY(char, nbytes);
+  PSID pSID = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
 
   if (!CopySid(nbytes, pSID, token_buf->User.Sid)) {
     if (PrintMiscellaneous && Verbose) {
       warning("GetTokenInformation failure: lasterror = %d,"
               " rsize = %d\n", GetLastError(), rsize);
     }
-    FREE_C_HEAP_ARRAY(char, token_buf);
-    FREE_C_HEAP_ARRAY(char, pSID);
+    FREE_C_HEAP_ARRAY(char, token_buf, mtInternal);
+    FREE_C_HEAP_ARRAY(char, pSID, mtInternal);
     CloseHandle(hAccessToken);
     return NULL;
   }
 
   // close the access token.
   CloseHandle(hAccessToken);
-  FREE_C_HEAP_ARRAY(char, token_buf);
+  FREE_C_HEAP_ARRAY(char, token_buf, mtInternal);
 
   return pSID;
 }
@@ -912,13 +912,13 @@
   }
 
   // create the new ACL
-  newACL = (PACL) NEW_C_HEAP_ARRAY(char, newACLsize);
+  newACL = (PACL) NEW_C_HEAP_ARRAY(char, newACLsize, mtInternal);
 
   if (!InitializeAcl(newACL, newACLsize, ACL_REVISION)) {
     if (PrintMiscellaneous && Verbose) {
       warning("InitializeAcl failure: lasterror = %d \n", GetLastError());
     }
-    FREE_C_HEAP_ARRAY(char, newACL);
+    FREE_C_HEAP_ARRAY(char, newACL, mtInternal);
     return false;
   }
 
@@ -931,7 +931,7 @@
         if (PrintMiscellaneous && Verbose) {
           warning("InitializeAcl failure: lasterror = %d \n", GetLastError());
         }
-        FREE_C_HEAP_ARRAY(char, newACL);
+        FREE_C_HEAP_ARRAY(char, newACL, mtInternal);
         return false;
       }
       if (((ACCESS_ALLOWED_ACE *)ace)->Header.AceFlags && INHERITED_ACE) {
@@ -958,7 +958,7 @@
           if (PrintMiscellaneous && Verbose) {
             warning("AddAce failure: lasterror = %d \n", GetLastError());
           }
-          FREE_C_HEAP_ARRAY(char, newACL);
+          FREE_C_HEAP_ARRAY(char, newACL, mtInternal);
           return false;
         }
       }
@@ -974,7 +974,7 @@
         warning("AddAccessAllowedAce failure: lasterror = %d \n",
                 GetLastError());
       }
-      FREE_C_HEAP_ARRAY(char, newACL);
+      FREE_C_HEAP_ARRAY(char, newACL, mtInternal);
       return false;
     }
   }
@@ -989,7 +989,7 @@
         if (PrintMiscellaneous && Verbose) {
           warning("InitializeAcl failure: lasterror = %d \n", GetLastError());
         }
-        FREE_C_HEAP_ARRAY(char, newACL);
+        FREE_C_HEAP_ARRAY(char, newACL, mtInternal);
         return false;
       }
       if (!AddAce(newACL, ACL_REVISION, MAXDWORD, ace,
@@ -997,7 +997,7 @@
         if (PrintMiscellaneous && Verbose) {
           warning("AddAce failure: lasterror = %d \n", GetLastError());
         }
-        FREE_C_HEAP_ARRAY(char, newACL);
+        FREE_C_HEAP_ARRAY(char, newACL, mtInternal);
         return false;
       }
       ace_index++;
@@ -1010,7 +1010,7 @@
       warning("SetSecurityDescriptorDacl failure:"
               " lasterror = %d \n", GetLastError());
     }
-    FREE_C_HEAP_ARRAY(char, newACL);
+    FREE_C_HEAP_ARRAY(char, newACL, mtInternal);
     return false;
   }
 
@@ -1030,7 +1030,7 @@
         warning("SetSecurityDescriptorControl failure:"
                 " lasterror = %d \n", GetLastError());
       }
-      FREE_C_HEAP_ARRAY(char, newACL);
+      FREE_C_HEAP_ARRAY(char, newACL, mtInternal);
       return false;
     }
   }
@@ -1054,7 +1054,7 @@
 
   // allocate space for a security descriptor
   PSECURITY_DESCRIPTOR pSD = (PSECURITY_DESCRIPTOR)
-                         NEW_C_HEAP_ARRAY(char, SECURITY_DESCRIPTOR_MIN_LENGTH);
+     NEW_C_HEAP_ARRAY(char, SECURITY_DESCRIPTOR_MIN_LENGTH, mtInternal);
 
   // initialize the security descriptor
   if (!InitializeSecurityDescriptor(pSD, SECURITY_DESCRIPTOR_REVISION)) {
@@ -1076,7 +1076,7 @@
   // return it to the caller.
   //
   LPSECURITY_ATTRIBUTES lpSA = (LPSECURITY_ATTRIBUTES)
-                            NEW_C_HEAP_ARRAY(char, sizeof(SECURITY_ATTRIBUTES));
+    NEW_C_HEAP_ARRAY(char, sizeof(SECURITY_ATTRIBUTES), mtInternal);
   lpSA->nLength = sizeof(SECURITY_ATTRIBUTES);
   lpSA->lpSecurityDescriptor = pSD;
   lpSA->bInheritHandle = FALSE;
@@ -1147,7 +1147,7 @@
   // create a security attributes structure with access control
   // entries as initialized above.
   LPSECURITY_ATTRIBUTES lpSA = make_security_attr(aces, 3);
-  FREE_C_HEAP_ARRAY(char, aces[0].pSid);
+  FREE_C_HEAP_ARRAY(char, aces[0].pSid, mtInternal);
   FreeSid(everybodySid);
   FreeSid(administratorsSid);
   return(lpSA);
@@ -1462,15 +1462,15 @@
   assert(((size != 0) && (size % os::vm_page_size() == 0)),
          "unexpected PerfMemry region size");
 
-  FREE_C_HEAP_ARRAY(char, user);
+  FREE_C_HEAP_ARRAY(char, user, mtInternal);
 
   // create the shared memory resources
   sharedmem_fileMapHandle =
                create_sharedmem_resources(dirname, filename, objectname, size);
 
-  FREE_C_HEAP_ARRAY(char, filename);
-  FREE_C_HEAP_ARRAY(char, objectname);
-  FREE_C_HEAP_ARRAY(char, dirname);
+  FREE_C_HEAP_ARRAY(char, filename, mtInternal);
+  FREE_C_HEAP_ARRAY(char, objectname, mtInternal);
+  FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
 
   if (sharedmem_fileMapHandle == NULL) {
     return NULL;
@@ -1621,7 +1621,7 @@
   // store file, we also don't following them when attaching
   //
   if (!is_directory_secure(dirname)) {
-    FREE_C_HEAP_ARRAY(char, dirname);
+    FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
               "Process not found");
   }
@@ -1640,10 +1640,10 @@
   strcpy(robjectname, objectname);
 
   // free the c heap resources that are no longer needed
-  if (luser != user) FREE_C_HEAP_ARRAY(char, luser);
-  FREE_C_HEAP_ARRAY(char, dirname);
-  FREE_C_HEAP_ARRAY(char, filename);
-  FREE_C_HEAP_ARRAY(char, objectname);
+  if (luser != user) FREE_C_HEAP_ARRAY(char, luser, mtInternal);
+  FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
+  FREE_C_HEAP_ARRAY(char, filename, mtInternal);
+  FREE_C_HEAP_ARRAY(char, objectname, mtInternal);
 
   if (*sizep == 0) {
     size = sharedmem_filesize(rfilename, CHECK);
--- a/hotspot/src/share/vm/asm/codeBuffer.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/asm/codeBuffer.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -261,7 +261,7 @@
 
 GrowableArray<int>* CodeBuffer::create_patch_overflow() {
   if (_overflow_arena == NULL) {
-    _overflow_arena = new Arena();
+    _overflow_arena = new (mtCode) Arena();
   }
   return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0);
 }
@@ -910,7 +910,7 @@
   _comments.add_comment(offset, comment);
 }
 
-class CodeComment: public CHeapObj {
+class CodeComment: public CHeapObj<mtCode> {
  private:
   friend class CodeComments;
   intptr_t     _offset;
@@ -919,13 +919,13 @@
 
   ~CodeComment() {
     assert(_next == NULL, "wrong interface for freeing list");
-    os::free((void*)_comment);
+    os::free((void*)_comment, mtCode);
   }
 
  public:
   CodeComment(intptr_t offset, const char * comment) {
     _offset = offset;
-    _comment = os::strdup(comment);
+    _comment = os::strdup(comment, mtCode);
     _next = NULL;
   }
 
--- a/hotspot/src/share/vm/c1/c1_CFGPrinter.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/c1/c1_CFGPrinter.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -33,7 +33,7 @@
 #ifndef PRODUCT
 
 
-class CFGPrinterOutput : public CHeapObj {
+class CFGPrinterOutput : public CHeapObj<mtCompiler> {
  private:
   outputStream* _output;
 
@@ -106,7 +106,7 @@
 
 
 CFGPrinterOutput::CFGPrinterOutput()
- : _output(new(ResourceObj::C_HEAP) fileStream("output.cfg"))
+ : _output(new(ResourceObj::C_HEAP, mtCompiler) fileStream("output.cfg"))
 {
 }
 
--- a/hotspot/src/share/vm/c1/c1_Compiler.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/c1/c1_Compiler.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -55,7 +55,7 @@
 
 void Compiler::initialize_all() {
   BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
-  Arena* arena = new Arena();
+  Arena* arena = new (mtCompiler) Arena();
   Runtime1::initialize(buffer_blob);
   FrameMap::initialize();
   // initialize data structures
--- a/hotspot/src/share/vm/c1/c1_LinearScan.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/c1/c1_LinearScan.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -2467,12 +2467,12 @@
 // Allocate them with new so they are never destroyed (otherwise, a
 // forced exit could destroy these objects while they are still in
 // use).
-ConstantOopWriteValue* LinearScan::_oop_null_scope_value = new (ResourceObj::C_HEAP) ConstantOopWriteValue(NULL);
-ConstantIntValue*      LinearScan::_int_m1_scope_value = new (ResourceObj::C_HEAP) ConstantIntValue(-1);
-ConstantIntValue*      LinearScan::_int_0_scope_value =  new (ResourceObj::C_HEAP) ConstantIntValue(0);
-ConstantIntValue*      LinearScan::_int_1_scope_value =  new (ResourceObj::C_HEAP) ConstantIntValue(1);
-ConstantIntValue*      LinearScan::_int_2_scope_value =  new (ResourceObj::C_HEAP) ConstantIntValue(2);
-LocationValue*         _illegal_value = new (ResourceObj::C_HEAP) LocationValue(Location());
+ConstantOopWriteValue* LinearScan::_oop_null_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantOopWriteValue(NULL);
+ConstantIntValue*      LinearScan::_int_m1_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(-1);
+ConstantIntValue*      LinearScan::_int_0_scope_value =  new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(0);
+ConstantIntValue*      LinearScan::_int_1_scope_value =  new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(1);
+ConstantIntValue*      LinearScan::_int_2_scope_value =  new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(2);
+LocationValue*         _illegal_value = new (ResourceObj::C_HEAP, mtCompiler) LocationValue(Location());
 
 void LinearScan::init_compute_debug_info() {
   // cache for frequently used scope values
--- a/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -111,7 +111,7 @@
   // This Arena is long lived and exists in the resource mark of the
   // compiler thread that initializes the initial ciObjectFactory which
   // creates the shared ciObjects that all later ciObjectFactories use.
-  Arena* arena = new Arena();
+  Arena* arena = new (mtCompiler) Arena();
   ciEnv initial(arena);
   ciEnv* env = ciEnv::current();
   env->_factory->init_shared_objects();
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -1368,7 +1368,7 @@
 };
 
 
-class LVT_Hash: public CHeapObj {
+class LVT_Hash: public CHeapObj<mtClass> {
  public:
   LocalVariableTableElement  *_elem;  // element
   LVT_Hash*                   _next;  // Next entry in hash table
--- a/hotspot/src/share/vm/classfile/classLoader.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/classfile/classLoader.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -153,7 +153,7 @@
     _meta_package_names = NULL;
     _num_meta_package_names = 0;
   } else {
-    _meta_package_names = NEW_C_HEAP_ARRAY(char*, num_meta_package_names);
+    _meta_package_names = NEW_C_HEAP_ARRAY(char*, num_meta_package_names, mtClass);
     _num_meta_package_names = num_meta_package_names;
     memcpy(_meta_package_names, meta_package_names, num_meta_package_names * sizeof(char*));
   }
@@ -161,7 +161,7 @@
 
 
 MetaIndex::~MetaIndex() {
-  FREE_C_HEAP_ARRAY(char*, _meta_package_names);
+  FREE_C_HEAP_ARRAY(char*, _meta_package_names, mtClass);
 }
 
 
@@ -192,7 +192,7 @@
 }
 
 ClassPathDirEntry::ClassPathDirEntry(char* dir) : ClassPathEntry() {
-  _dir = NEW_C_HEAP_ARRAY(char, strlen(dir)+1);
+  _dir = NEW_C_HEAP_ARRAY(char, strlen(dir)+1, mtClass);
   strcpy(_dir, dir);
 }
 
@@ -229,7 +229,7 @@
 
 ClassPathZipEntry::ClassPathZipEntry(jzfile* zip, const char* zip_name) : ClassPathEntry() {
   _zip = zip;
-  _zip_name = NEW_C_HEAP_ARRAY(char, strlen(zip_name)+1);
+  _zip_name = NEW_C_HEAP_ARRAY(char, strlen(zip_name)+1, mtClass);
   strcpy(_zip_name, zip_name);
 }
 
@@ -237,7 +237,7 @@
   if (ZipClose != NULL) {
     (*ZipClose)(_zip);
   }
-  FREE_C_HEAP_ARRAY(char, _zip_name);
+  FREE_C_HEAP_ARRAY(char, _zip_name, mtClass);
 }
 
 ClassFileStream* ClassPathZipEntry::open_stream(const char* name) {
@@ -454,11 +454,11 @@
     while (sys_class_path[end] && sys_class_path[end] != os::path_separator()[0]) {
       end++;
     }
-    char* path = NEW_C_HEAP_ARRAY(char, end-start+1);
+    char* path = NEW_C_HEAP_ARRAY(char, end-start+1, mtClass);
     strncpy(path, &sys_class_path[start], end-start);
     path[end-start] = '\0';
     update_class_path_entry_list(path, false);
-    FREE_C_HEAP_ARRAY(char, path);
+    FREE_C_HEAP_ARRAY(char, path, mtClass);
     while (sys_class_path[end] == os::path_separator()[0]) {
       end++;
     }
@@ -652,13 +652,13 @@
 // in the classpath must be the same files, in the same order, even
 // though the exact name is not the same.
 
-class PackageInfo: public BasicHashtableEntry {
+class PackageInfo: public BasicHashtableEntry<mtClass> {
 public:
   const char* _pkgname;       // Package name
   int _classpath_index;       // Index of directory or JAR file loaded from
 
   PackageInfo* next() {
-    return (PackageInfo*)BasicHashtableEntry::next();
+    return (PackageInfo*)BasicHashtableEntry<mtClass>::next();
   }
 
   const char* pkgname()           { return _pkgname; }
@@ -674,7 +674,7 @@
 };
 
 
-class PackageHashtable : public BasicHashtable {
+class PackageHashtable : public BasicHashtable<mtClass> {
 private:
   inline unsigned int compute_hash(const char *s, int n) {
     unsigned int val = 0;
@@ -685,7 +685,7 @@
   }
 
   PackageInfo* bucket(int index) {
-    return (PackageInfo*)BasicHashtable::bucket(index);
+    return (PackageInfo*)BasicHashtable<mtClass>::bucket(index);
   }
 
   PackageInfo* get_entry(int index, unsigned int hash,
@@ -702,10 +702,10 @@
 
 public:
   PackageHashtable(int table_size)
-    : BasicHashtable(table_size, sizeof(PackageInfo)) {}
+    : BasicHashtable<mtClass>(table_size, sizeof(PackageInfo)) {}
 
-  PackageHashtable(int table_size, HashtableBucket* t, int number_of_entries)
-    : BasicHashtable(table_size, sizeof(PackageInfo), t, number_of_entries) {}
+  PackageHashtable(int table_size, HashtableBucket<mtClass>* t, int number_of_entries)
+    : BasicHashtable<mtClass>(table_size, sizeof(PackageInfo), t, number_of_entries) {}
 
   PackageInfo* get_entry(const char* pkgname, int n) {
     unsigned int hash = compute_hash(pkgname, n);
@@ -715,14 +715,14 @@
   PackageInfo* new_entry(char* pkgname, int n) {
     unsigned int hash = compute_hash(pkgname, n);
     PackageInfo* pp;
-    pp = (PackageInfo*)BasicHashtable::new_entry(hash);
+    pp = (PackageInfo*)BasicHashtable<mtClass>::new_entry(hash);
     pp->set_pkgname(pkgname);
     return pp;
   }
 
   void add_entry(PackageInfo* pp) {
     int index = hash_to_index(pp->hash());
-    BasicHashtable::add_entry(index, pp);
+    BasicHashtable<mtClass>::add_entry(index, pp);
   }
 
   void copy_pkgnames(const char** packages) {
@@ -742,7 +742,7 @@
 void PackageHashtable::copy_table(char** top, char* end,
                                   PackageHashtable* table) {
   // Copy (relocate) the table to the shared space.
-  BasicHashtable::copy_table(top, end);
+  BasicHashtable<mtClass>::copy_table(top, end);
 
   // Calculate the space needed for the package name strings.
   int i;
@@ -815,7 +815,7 @@
       // Package prefix found
       int n = cp - pkgname + 1;
 
-      char* new_pkgname = NEW_C_HEAP_ARRAY(char, n + 1);
+      char* new_pkgname = NEW_C_HEAP_ARRAY(char, n + 1, mtClass);
       if (new_pkgname == NULL) {
         return false;
       }
@@ -929,10 +929,10 @@
 }
 
 
-void ClassLoader::create_package_info_table(HashtableBucket *t, int length,
+void ClassLoader::create_package_info_table(HashtableBucket<mtClass> *t, int length,
                                             int number_of_entries) {
   assert(_package_hash_table == NULL, "One package info table allowed.");
-  assert(length == package_hash_table_size * sizeof(HashtableBucket),
+  assert(length == package_hash_table_size * sizeof(HashtableBucket<mtClass>),
          "bad shared package info size.");
   _package_hash_table = new PackageHashtable(package_hash_table_size, t,
                                              number_of_entries);
--- a/hotspot/src/share/vm/classfile/classLoader.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/classfile/classLoader.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -33,7 +33,7 @@
 
 
 // Meta-index (optional, to be able to skip opening boot classpath jar files)
-class MetaIndex: public CHeapObj {
+class MetaIndex: public CHeapObj<mtClass> {
  private:
   char** _meta_package_names;
   int    _num_meta_package_names;
@@ -46,7 +46,7 @@
 
 // Class path entry (directory or zip file)
 
-class ClassPathEntry: public CHeapObj {
+class ClassPathEntry: public CHeapObj<mtClass> {
  private:
   ClassPathEntry* _next;
  public:
@@ -141,7 +141,7 @@
 
 class PackageHashtable;
 class PackageInfo;
-class HashtableBucket;
+template <MEMFLAGS F> class HashtableBucket;
 
 class ClassLoader: AllStatic {
  public:
@@ -299,7 +299,7 @@
   // Initialization
   static void initialize();
   static void create_package_info_table();
-  static void create_package_info_table(HashtableBucket *t, int length,
+  static void create_package_info_table(HashtableBucket<mtClass> *t, int length,
                                         int number_of_entries);
   static int compute_Object_vtable();
 
--- a/hotspot/src/share/vm/classfile/dictionary.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/classfile/dictionary.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -36,16 +36,16 @@
 
 
 Dictionary::Dictionary(int table_size)
-  : TwoOopHashtable<klassOop>(table_size, sizeof(DictionaryEntry)) {
+  : TwoOopHashtable<klassOop, mtClass>(table_size, sizeof(DictionaryEntry)) {
   _current_class_index = 0;
   _current_class_entry = NULL;
 };
 
 
 
-Dictionary::Dictionary(int table_size, HashtableBucket* t,
+Dictionary::Dictionary(int table_size, HashtableBucket<mtClass>* t,
                        int number_of_entries)
-  : TwoOopHashtable<klassOop>(table_size, sizeof(DictionaryEntry), t, number_of_entries) {
+  : TwoOopHashtable<klassOop, mtClass>(table_size, sizeof(DictionaryEntry), t, number_of_entries) {
   _current_class_index = 0;
   _current_class_entry = NULL;
 };
@@ -54,7 +54,7 @@
 DictionaryEntry* Dictionary::new_entry(unsigned int hash, klassOop klass,
                                        oop loader) {
   DictionaryEntry* entry;
-  entry = (DictionaryEntry*)Hashtable<klassOop>::new_entry(hash, klass);
+  entry = (DictionaryEntry*)Hashtable<klassOop, mtClass>::new_entry(hash, klass);
   entry->set_loader(loader);
   entry->set_pd_set(NULL);
   return entry;
@@ -62,7 +62,7 @@
 
 
 DictionaryEntry* Dictionary::new_entry() {
-  DictionaryEntry* entry = (DictionaryEntry*)Hashtable<klassOop>::new_entry(0L, NULL);
+  DictionaryEntry* entry = (DictionaryEntry*)Hashtable<klassOop, mtClass>::new_entry(0L, NULL);
   entry->set_loader(NULL);
   entry->set_pd_set(NULL);
   return entry;
@@ -76,7 +76,7 @@
     entry->set_pd_set(to_delete->next());
     delete to_delete;
   }
-  Hashtable<klassOop>::free_entry(entry);
+  Hashtable<klassOop, mtClass>::free_entry(entry);
 }
 
 
@@ -554,12 +554,12 @@
 }
 
 SymbolPropertyTable::SymbolPropertyTable(int table_size)
-  : Hashtable<Symbol*>(table_size, sizeof(SymbolPropertyEntry))
+  : Hashtable<Symbol*, mtSymbol>(table_size, sizeof(SymbolPropertyEntry))
 {
 }
-SymbolPropertyTable::SymbolPropertyTable(int table_size, HashtableBucket* t,
+SymbolPropertyTable::SymbolPropertyTable(int table_size, HashtableBucket<mtSymbol>* t,
                                          int number_of_entries)
-  : Hashtable<Symbol*>(table_size, sizeof(SymbolPropertyEntry), t, number_of_entries)
+  : Hashtable<Symbol*, mtSymbol>(table_size, sizeof(SymbolPropertyEntry), t, number_of_entries)
 {
 }
 
@@ -584,7 +584,7 @@
   assert(find_entry(index, hash, sym, sym_mode) == NULL, "no double entry");
 
   SymbolPropertyEntry* p = new_entry(hash, sym, sym_mode);
-  Hashtable<Symbol*>::add_entry(index, p);
+  Hashtable<Symbol*, mtSymbol>::add_entry(index, p);
   return p;
 }
 
--- a/hotspot/src/share/vm/classfile/dictionary.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/classfile/dictionary.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -36,7 +36,7 @@
 // The data structure for the system dictionary (and the shared system
 // dictionary).
 
-class Dictionary : public TwoOopHashtable<klassOop> {
+class Dictionary : public TwoOopHashtable<klassOop, mtClass> {
   friend class VMStructs;
 private:
   // current iteration index.
@@ -48,22 +48,22 @@
                              Symbol* name, Handle loader);
 
   DictionaryEntry* bucket(int i) {
-    return (DictionaryEntry*)Hashtable<klassOop>::bucket(i);
+    return (DictionaryEntry*)Hashtable<klassOop, mtClass>::bucket(i);
   }
 
   // The following method is not MT-safe and must be done under lock.
   DictionaryEntry** bucket_addr(int i) {
-    return (DictionaryEntry**)Hashtable<klassOop>::bucket_addr(i);
+    return (DictionaryEntry**)Hashtable<klassOop, mtClass>::bucket_addr(i);
   }
 
   void add_entry(int index, DictionaryEntry* new_entry) {
-    Hashtable<klassOop>::add_entry(index, (HashtableEntry<oop>*)new_entry);
+    Hashtable<klassOop, mtClass>::add_entry(index, (HashtableEntry<oop, mtClass>*)new_entry);
   }
 
 
 public:
   Dictionary(int table_size);
-  Dictionary(int table_size, HashtableBucket* t, int number_of_entries);
+  Dictionary(int table_size, HashtableBucket<mtClass>* t, int number_of_entries);
 
   DictionaryEntry* new_entry(unsigned int hash, klassOop klass, oop loader);
 
@@ -129,7 +129,7 @@
 // The following classes can be in dictionary.cpp, but we need these
 // to be in header file so that SA's vmStructs can access.
 
-class ProtectionDomainEntry :public CHeapObj {
+class ProtectionDomainEntry :public CHeapObj<mtClass> {
   friend class VMStructs;
  public:
   ProtectionDomainEntry* _next;
@@ -147,7 +147,7 @@
 // An entry in the system dictionary, this describes a class as
 // { klassOop, loader, protection_domain }.
 
-class DictionaryEntry : public HashtableEntry<klassOop> {
+class DictionaryEntry : public HashtableEntry<klassOop, mtClass> {
   friend class VMStructs;
  private:
   // Contains the set of approved protection domains that can access
@@ -166,11 +166,11 @@
   klassOop* klass_addr() { return (klassOop*)literal_addr(); }
 
   DictionaryEntry* next() const {
-    return (DictionaryEntry*)HashtableEntry<klassOop>::next();
+    return (DictionaryEntry*)HashtableEntry<klassOop, mtClass>::next();
   }
 
   DictionaryEntry** next_addr() {
-    return (DictionaryEntry**)HashtableEntry<klassOop>::next_addr();
+    return (DictionaryEntry**)HashtableEntry<klassOop, mtClass>::next_addr();
   }
 
   oop loader() const { return _loader; }
@@ -228,7 +228,7 @@
 
 // Entry in a SymbolPropertyTable, mapping a single Symbol*
 // to a managed and an unmanaged pointer.
-class SymbolPropertyEntry : public HashtableEntry<Symbol*> {
+class SymbolPropertyEntry : public HashtableEntry<Symbol*, mtSymbol> {
   friend class VMStructs;
  private:
   intptr_t _symbol_mode;  // secondary key
@@ -248,11 +248,11 @@
   void set_property_data(address p) { _property_data = p; }
 
   SymbolPropertyEntry* next() const {
-    return (SymbolPropertyEntry*)HashtableEntry<Symbol*>::next();
+    return (SymbolPropertyEntry*)HashtableEntry<Symbol*, mtSymbol>::next();
   }
 
   SymbolPropertyEntry** next_addr() {
-    return (SymbolPropertyEntry**)HashtableEntry<Symbol*>::next_addr();
+    return (SymbolPropertyEntry**)HashtableEntry<Symbol*, mtSymbol>::next_addr();
   }
 
   oop* property_oop_addr()          { return &_property_oop; }
@@ -278,16 +278,16 @@
 // A system-internal mapping of symbols to pointers, both managed
 // and unmanaged.  Used to record the auto-generation of each method
 // MethodHandle.invoke(S)T, for all signatures (S)T.
-class SymbolPropertyTable : public Hashtable<Symbol*> {
+class SymbolPropertyTable : public Hashtable<Symbol*, mtSymbol> {
   friend class VMStructs;
 private:
   SymbolPropertyEntry* bucket(int i) {
-    return (SymbolPropertyEntry*) Hashtable<Symbol*>::bucket(i);
+    return (SymbolPropertyEntry*) Hashtable<Symbol*, mtSymbol>::bucket(i);
   }
 
   // The following method is not MT-safe and must be done under lock.
   SymbolPropertyEntry** bucket_addr(int i) {
-    return (SymbolPropertyEntry**) Hashtable<Symbol*>::bucket_addr(i);
+    return (SymbolPropertyEntry**) Hashtable<Symbol*, mtSymbol>::bucket_addr(i);
   }
 
   void add_entry(int index, SymbolPropertyEntry* new_entry) {
@@ -298,7 +298,7 @@
   }
 
   SymbolPropertyEntry* new_entry(unsigned int hash, Symbol* symbol, intptr_t symbol_mode) {
-    SymbolPropertyEntry* entry = (SymbolPropertyEntry*) Hashtable<Symbol*>::new_entry(hash, symbol);
+    SymbolPropertyEntry* entry = (SymbolPropertyEntry*) Hashtable<Symbol*, mtSymbol>::new_entry(hash, symbol);
     // Hashtable with Symbol* literal must increment and decrement refcount.
     symbol->increment_refcount();
     entry->set_symbol_mode(symbol_mode);
@@ -309,17 +309,17 @@
 
 public:
   SymbolPropertyTable(int table_size);
-  SymbolPropertyTable(int table_size, HashtableBucket* t, int number_of_entries);
+  SymbolPropertyTable(int table_size, HashtableBucket<mtSymbol>* t, int number_of_entries);
 
   void free_entry(SymbolPropertyEntry* entry) {
     // decrement Symbol refcount here because hashtable doesn't.
     entry->literal()->decrement_refcount();
-    Hashtable<Symbol*>::free_entry(entry);
+    Hashtable<Symbol*, mtSymbol>::free_entry(entry);
   }
 
   unsigned int compute_hash(Symbol* sym, intptr_t symbol_mode) {
     // Use the regular identity_hash.
-    return Hashtable<Symbol*>::compute_hash(sym) ^ symbol_mode;
+    return Hashtable<Symbol*, mtSymbol>::compute_hash(sym) ^ symbol_mode;
   }
 
   int index_for(Symbol* name, intptr_t symbol_mode) {
--- a/hotspot/src/share/vm/classfile/javaAssertions.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/classfile/javaAssertions.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -58,7 +58,7 @@
   // it is never freed, so will be leaked (along with other option strings -
   // e.g., bootclasspath) if a process creates/destroys multiple VMs.
   int len = (int)strlen(name);
-  char *name_copy = NEW_C_HEAP_ARRAY(char, len + 1);
+  char *name_copy = NEW_C_HEAP_ARRAY(char, len + 1, mtClass);
   strcpy(name_copy, name);
 
   // Figure out which list the new item should go on.  Names that end in "..."
--- a/hotspot/src/share/vm/classfile/javaAssertions.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/classfile/javaAssertions.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -68,7 +68,7 @@
   static OptionList*    _packages;      // Options for package trees.
 };
 
-class JavaAssertions::OptionList: public CHeapObj {
+class JavaAssertions::OptionList: public CHeapObj<mtClass> {
 public:
   inline OptionList(const char* name, bool enable, OptionList* next);
 
--- a/hotspot/src/share/vm/classfile/loaderConstraints.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/classfile/loaderConstraints.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -31,7 +31,7 @@
 #include "utilities/hashtable.inline.hpp"
 
 LoaderConstraintTable::LoaderConstraintTable(int nof_buckets)
-  : Hashtable<klassOop>(nof_buckets, sizeof(LoaderConstraintEntry)) {};
+  : Hashtable<klassOop, mtClass>(nof_buckets, sizeof(LoaderConstraintEntry)) {};
 
 
 LoaderConstraintEntry* LoaderConstraintTable::new_entry(
@@ -39,7 +39,7 @@
                                  klassOop klass, int num_loaders,
                                  int max_loaders) {
   LoaderConstraintEntry* entry;
-  entry = (LoaderConstraintEntry*)Hashtable<klassOop>::new_entry(hash, klass);
+  entry = (LoaderConstraintEntry*)Hashtable<klassOop, mtClass>::new_entry(hash, klass);
   entry->set_name(name);
   entry->set_num_loaders(num_loaders);
   entry->set_max_loaders(max_loaders);
@@ -49,7 +49,7 @@
 void LoaderConstraintTable::free_entry(LoaderConstraintEntry *entry) {
   // decrement name refcount before freeing
   entry->name()->decrement_refcount();
-  Hashtable<klassOop>::free_entry(entry);
+  Hashtable<klassOop, mtClass>::free_entry(entry);
 }
 
 
@@ -164,7 +164,7 @@
 
         // Purge entry
         *p = probe->next();
-        FREE_C_HEAP_ARRAY(oop, probe->loaders());
+        FREE_C_HEAP_ARRAY(oop, probe->loaders(), mtClass);
         free_entry(probe);
       } else {
 #ifdef ASSERT
@@ -224,7 +224,7 @@
         int index = hash_to_index(hash);
         LoaderConstraintEntry* p;
         p = new_entry(hash, class_name, klass, 2, 2);
-        p->set_loaders(NEW_C_HEAP_ARRAY(oop, 2));
+        p->set_loaders(NEW_C_HEAP_ARRAY(oop, 2, mtClass));
         p->set_loader(0, class_loader1());
         p->set_loader(1, class_loader2());
         p->set_klass(klass);
@@ -340,10 +340,10 @@
                                                     int nfree) {
     if (p->max_loaders() - p->num_loaders() < nfree) {
         int n = nfree + p->num_loaders();
-        oop* new_loaders = NEW_C_HEAP_ARRAY(oop, n);
+        oop* new_loaders = NEW_C_HEAP_ARRAY(oop, n, mtClass);
         memcpy(new_loaders, p->loaders(), sizeof(oop) * p->num_loaders());
         p->set_max_loaders(n);
-        FREE_C_HEAP_ARRAY(oop, p->loaders());
+        FREE_C_HEAP_ARRAY(oop, p->loaders(), mtClass);
         p->set_loaders(new_loaders);
     }
 }
@@ -425,7 +425,7 @@
   }
 
   *pp2 = p2->next();
-  FREE_C_HEAP_ARRAY(oop, p2->loaders());
+  FREE_C_HEAP_ARRAY(oop, p2->loaders(), mtClass);
   free_entry(p2);
   return;
 }
--- a/hotspot/src/share/vm/classfile/loaderConstraints.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/classfile/loaderConstraints.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -31,7 +31,7 @@
 
 class LoaderConstraintEntry;
 
-class LoaderConstraintTable : public Hashtable<klassOop> {
+class LoaderConstraintTable : public Hashtable<klassOop, mtClass> {
   friend class VMStructs;
 private:
 
@@ -53,11 +53,11 @@
   void free_entry(LoaderConstraintEntry *entry);
 
   LoaderConstraintEntry* bucket(int i) {
-    return (LoaderConstraintEntry*)Hashtable<klassOop>::bucket(i);
+    return (LoaderConstraintEntry*)Hashtable<klassOop, mtClass>::bucket(i);
   }
 
   LoaderConstraintEntry** bucket_addr(int i) {
-    return (LoaderConstraintEntry**)Hashtable<klassOop>::bucket_addr(i);
+    return (LoaderConstraintEntry**)Hashtable<klassOop, mtClass>::bucket_addr(i);
   }
 
   // GC support
@@ -94,7 +94,7 @@
 #endif
 };
 
-class LoaderConstraintEntry : public HashtableEntry<klassOop> {
+class LoaderConstraintEntry : public HashtableEntry<klassOop, mtClass> {
   friend class VMStructs;
 private:
   Symbol*                _name;                   // class name
@@ -109,14 +109,14 @@
   void set_klass(klassOop k) { set_literal(k); }
 
   LoaderConstraintEntry* next() {
-    return (LoaderConstraintEntry*)HashtableEntry<klassOop>::next();
+    return (LoaderConstraintEntry*)HashtableEntry<klassOop, mtClass>::next();
   }
 
   LoaderConstraintEntry** next_addr() {
-    return (LoaderConstraintEntry**)HashtableEntry<klassOop>::next_addr();
+    return (LoaderConstraintEntry**)HashtableEntry<klassOop, mtClass>::next_addr();
   }
   void set_next(LoaderConstraintEntry* next) {
-    HashtableEntry<klassOop>::set_next(next);
+    HashtableEntry<klassOop, mtClass>::set_next(next);
   }
 
   Symbol* name() { return _name; }
--- a/hotspot/src/share/vm/classfile/placeholders.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/classfile/placeholders.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -34,7 +34,7 @@
 PlaceholderEntry* PlaceholderTable::new_entry(int hash, Symbol* name,
                                               oop loader, bool havesupername,
                                               Symbol* supername) {
-  PlaceholderEntry* entry = (PlaceholderEntry*)Hashtable<Symbol*>::new_entry(hash, name);
+  PlaceholderEntry* entry = (PlaceholderEntry*)Hashtable<Symbol*, mtClass>::new_entry(hash, name);
   // Hashtable with Symbol* literal must increment and decrement refcount.
   name->increment_refcount();
   entry->set_loader(loader);
@@ -52,7 +52,7 @@
   // decrement Symbol refcount here because Hashtable doesn't.
   entry->literal()->decrement_refcount();
   if (entry->supername() != NULL) entry->supername()->decrement_refcount();
-  Hashtable<Symbol*>::free_entry(entry);
+  Hashtable<Symbol*, mtClass>::free_entry(entry);
 }
 
 
@@ -166,7 +166,7 @@
   }
 
 PlaceholderTable::PlaceholderTable(int table_size)
-    : TwoOopHashtable<Symbol*>(table_size, sizeof(PlaceholderEntry)) {
+    : TwoOopHashtable<Symbol*, mtClass>(table_size, sizeof(PlaceholderEntry)) {
 }
 
 
--- a/hotspot/src/share/vm/classfile/placeholders.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/classfile/placeholders.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -34,7 +34,7 @@
 // being loaded, as well as arrays of primitives.
 //
 
-class PlaceholderTable : public TwoOopHashtable<Symbol*> {
+class PlaceholderTable : public TwoOopHashtable<Symbol*, mtClass> {
   friend class VMStructs;
 
 public:
@@ -44,15 +44,15 @@
   void free_entry(PlaceholderEntry* entry);
 
   PlaceholderEntry* bucket(int i) {
-    return (PlaceholderEntry*)Hashtable<Symbol*>::bucket(i);
+    return (PlaceholderEntry*)Hashtable<Symbol*, mtClass>::bucket(i);
   }
 
   PlaceholderEntry** bucket_addr(int i) {
-    return (PlaceholderEntry**)Hashtable<Symbol*>::bucket_addr(i);
+    return (PlaceholderEntry**)Hashtable<Symbol*, mtClass>::bucket_addr(i);
   }
 
   void add_entry(int index, PlaceholderEntry* new_entry) {
-    Hashtable<Symbol*>::add_entry(index, (HashtableEntry<Symbol*>*)new_entry);
+    Hashtable<Symbol*, mtClass>::add_entry(index, (HashtableEntry<Symbol*, mtClass>*)new_entry);
   }
 
   void add_entry(int index, unsigned int hash, Symbol* name,
@@ -116,7 +116,7 @@
 // For DEFINE_CLASS, the head of the queue owns the
 // define token and the rest of the threads wait to return the
 // result the first thread gets.
-class SeenThread: public CHeapObj {
+class SeenThread: public CHeapObj<mtInternal> {
 private:
    Thread *_thread;
    SeenThread* _stnext;
@@ -152,7 +152,7 @@
 // on store ordering here.
 // The system dictionary is the only user of this class.
 
-class PlaceholderEntry : public HashtableEntry<Symbol*> {
+class PlaceholderEntry : public HashtableEntry<Symbol*, mtClass> {
   friend class VMStructs;
 
 
@@ -206,11 +206,11 @@
   void               set_defineThreadQ(SeenThread* SeenThread) { _defineThreadQ = SeenThread; }
 
   PlaceholderEntry* next() const {
-    return (PlaceholderEntry*)HashtableEntry<Symbol*>::next();
+    return (PlaceholderEntry*)HashtableEntry<Symbol*, mtClass>::next();
   }
 
   PlaceholderEntry** next_addr() {
-    return (PlaceholderEntry**)HashtableEntry<Symbol*>::next_addr();
+    return (PlaceholderEntry**)HashtableEntry<Symbol*, mtClass>::next_addr();
   }
 
   // Test for equality
--- a/hotspot/src/share/vm/classfile/resolutionErrors.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/classfile/resolutionErrors.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -67,7 +67,7 @@
 ResolutionErrorEntry* ResolutionErrorTable::new_entry(int hash, constantPoolOop pool,
                                                       int cp_index, Symbol* error)
 {
-  ResolutionErrorEntry* entry = (ResolutionErrorEntry*)Hashtable<constantPoolOop>::new_entry(hash, pool);
+  ResolutionErrorEntry* entry = (ResolutionErrorEntry*)Hashtable<constantPoolOop, mtClass>::new_entry(hash, pool);
   entry->set_cp_index(cp_index);
   NOT_PRODUCT(entry->set_error(NULL);)
   entry->set_error(error);
@@ -79,13 +79,13 @@
   // decrement error refcount
   assert(entry->error() != NULL, "error should be set");
   entry->error()->decrement_refcount();
-  Hashtable<constantPoolOop>::free_entry(entry);
+  Hashtable<constantPoolOop, mtClass>::free_entry(entry);
 }
 
 
 // create resolution error table
 ResolutionErrorTable::ResolutionErrorTable(int table_size)
-    : Hashtable<constantPoolOop>(table_size, sizeof(ResolutionErrorEntry)) {
+    : Hashtable<constantPoolOop, mtClass>(table_size, sizeof(ResolutionErrorEntry)) {
 }
 
 // GC support
--- a/hotspot/src/share/vm/classfile/resolutionErrors.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/classfile/resolutionErrors.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -33,7 +33,7 @@
 // ResolutionError objects are used to record errors encountered during
 // constant pool resolution (JVMS 5.4.3).
 
-class ResolutionErrorTable : public Hashtable<constantPoolOop> {
+class ResolutionErrorTable : public Hashtable<constantPoolOop, mtClass> {
 
 public:
   ResolutionErrorTable(int table_size);
@@ -42,15 +42,16 @@
   void free_entry(ResolutionErrorEntry *entry);
 
   ResolutionErrorEntry* bucket(int i) {
-    return (ResolutionErrorEntry*)Hashtable<constantPoolOop>::bucket(i);
+    return (ResolutionErrorEntry*)Hashtable<constantPoolOop, mtClass>::bucket(i);
   }
 
   ResolutionErrorEntry** bucket_addr(int i) {
-    return (ResolutionErrorEntry**)Hashtable<constantPoolOop>::bucket_addr(i);
+    return (ResolutionErrorEntry**)Hashtable<constantPoolOop, mtClass>::bucket_addr(i);
   }
 
   void add_entry(int index, ResolutionErrorEntry* new_entry) {
-    Hashtable<constantPoolOop>::add_entry(index, (HashtableEntry<constantPoolOop>*)new_entry);
+    Hashtable<constantPoolOop, mtClass>::add_entry(index,
+      (HashtableEntry<constantPoolOop, mtClass>*)new_entry);
   }
 
   void add_entry(int index, unsigned int hash,
@@ -74,7 +75,7 @@
 };
 
 
-class ResolutionErrorEntry : public HashtableEntry<constantPoolOop> {
+class ResolutionErrorEntry : public HashtableEntry<constantPoolOop, mtClass> {
  private:
   int               _cp_index;
   Symbol*           _error;
@@ -90,11 +91,11 @@
   void               set_error(Symbol* e);
 
   ResolutionErrorEntry* next() const {
-    return (ResolutionErrorEntry*)HashtableEntry<constantPoolOop>::next();
+    return (ResolutionErrorEntry*)HashtableEntry<constantPoolOop, mtClass>::next();
   }
 
   ResolutionErrorEntry** next_addr() {
-    return (ResolutionErrorEntry**)HashtableEntry<constantPoolOop>::next_addr();
+    return (ResolutionErrorEntry**)HashtableEntry<constantPoolOop, mtClass>::next_addr();
   }
 
   // GC support
--- a/hotspot/src/share/vm/classfile/symbolTable.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/classfile/symbolTable.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -64,9 +64,9 @@
 void SymbolTable::initialize_symbols(int arena_alloc_size) {
   // Initialize the arena for global symbols, size passed in depends on CDS.
   if (arena_alloc_size == 0) {
-    _arena = new Arena();
+    _arena = new (mtSymbol) Arena();
   } else {
-    _arena = new Arena(arena_alloc_size);
+    _arena = new (mtSymbol) Arena(arena_alloc_size);
   }
 }
 
@@ -74,7 +74,7 @@
 void SymbolTable::symbols_do(SymbolClosure *cl) {
   const int n = the_table()->table_size();
   for (int i = 0; i < n; i++) {
-    for (HashtableEntry<Symbol*>* p = the_table()->bucket(i);
+    for (HashtableEntry<Symbol*, mtSymbol>* p = the_table()->bucket(i);
          p != NULL;
          p = p->next()) {
       cl->do_symbol(p->literal_addr());
@@ -92,8 +92,8 @@
   int total = 0;
   size_t memory_total = 0;
   for (int i = 0; i < the_table()->table_size(); ++i) {
-    HashtableEntry<Symbol*>** p = the_table()->bucket_addr(i);
-    HashtableEntry<Symbol*>* entry = the_table()->bucket(i);
+    HashtableEntry<Symbol*, mtSymbol>** p = the_table()->bucket_addr(i);
+    HashtableEntry<Symbol*, mtSymbol>* entry = the_table()->bucket(i);
     while (entry != NULL) {
       // Shared entries are normally at the end of the bucket and if we run into
       // a shared entry, then there is nothing more to remove. However, if we
@@ -117,7 +117,7 @@
         p = entry->next_addr();
       }
       // get next entry
-      entry = (HashtableEntry<Symbol*>*)HashtableEntry<Symbol*>::make_ptr(*p);
+      entry = (HashtableEntry<Symbol*, mtSymbol>*)HashtableEntry<Symbol*, mtSymbol>::make_ptr(*p);
     }
   }
   symbols_removed += removed;
@@ -164,7 +164,7 @@
 Symbol* SymbolTable::lookup(int index, const char* name,
                               int len, unsigned int hash) {
   int count = 0;
-  for (HashtableEntry<Symbol*>* e = bucket(index); e != NULL; e = e->next()) {
+  for (HashtableEntry<Symbol*, mtSymbol>* e = bucket(index); e != NULL; e = e->next()) {
     count++;  // count all entries in this bucket, not just ones with same hash
     if (e->hash() == hash) {
       Symbol* sym = e->literal();
@@ -176,7 +176,7 @@
     }
   }
   // If the bucket size is too deep check if this hash code is insufficient.
-  if (count >= BasicHashtable::rehash_count && !needs_rehashing()) {
+  if (count >= BasicHashtable<mtSymbol>::rehash_count && !needs_rehashing()) {
     _needs_rehashing = check_rehash_table(count);
   }
   return NULL;
@@ -268,7 +268,7 @@
   unsigned int hash = hash_symbol((char*)sym->bytes(), sym->utf8_length());
   int index = the_table()->hash_to_index(hash);
 
-  for (HashtableEntry<Symbol*>* e = the_table()->bucket(index); e != NULL; e = e->next()) {
+  for (HashtableEntry<Symbol*, mtSymbol>* e = the_table()->bucket(index); e != NULL; e = e->next()) {
     if (e->hash() == hash) {
       Symbol* literal_sym = e->literal();
       if (sym == literal_sym) {
@@ -387,7 +387,7 @@
   Symbol* sym = allocate_symbol(name, len, c_heap, CHECK_NULL);
   assert(sym->equals((char*)name, len), "symbol must be properly initialized");
 
-  HashtableEntry<Symbol*>* entry = new_entry(hashValue, sym);
+  HashtableEntry<Symbol*, mtSymbol>* entry = new_entry(hashValue, sym);
   add_entry(index, entry);
   return sym;
 }
@@ -435,7 +435,7 @@
       bool c_heap = class_loader() != NULL;
       Symbol* sym = allocate_symbol((const u1*)names[i], lengths[i], c_heap, CHECK_(false));
       assert(sym->equals(names[i], lengths[i]), "symbol must be properly initialized");  // why wouldn't it be???
-      HashtableEntry<Symbol*>* entry = new_entry(hashValue, sym);
+      HashtableEntry<Symbol*, mtSymbol>* entry = new_entry(hashValue, sym);
       add_entry(index, entry);
       cp->symbol_at_put(cp_indices[i], sym);
     }
@@ -446,7 +446,7 @@
 
 void SymbolTable::verify() {
   for (int i = 0; i < the_table()->table_size(); ++i) {
-    HashtableEntry<Symbol*>* p = the_table()->bucket(i);
+    HashtableEntry<Symbol*, mtSymbol>* p = the_table()->bucket(i);
     for ( ; p != NULL; p = p->next()) {
       Symbol* s = (Symbol*)(p->literal());
       guarantee(s != NULL, "symbol is NULL");
@@ -462,7 +462,7 @@
   NumberSeq summary;
   for (int i = 0; i < the_table()->table_size(); ++i) {
     int count = 0;
-    for (HashtableEntry<Symbol*>* e = the_table()->bucket(i);
+    for (HashtableEntry<Symbol*, mtSymbol>* e = the_table()->bucket(i);
        e != NULL; e = e->next()) {
       count++;
     }
@@ -499,7 +499,7 @@
   int memory_total = 0;
   int count = 0;
   for (i = 0; i < the_table()->table_size(); i++) {
-    HashtableEntry<Symbol*>* p = the_table()->bucket(i);
+    HashtableEntry<Symbol*, mtSymbol>* p = the_table()->bucket(i);
     for ( ; p != NULL; p = p->next()) {
       memory_total += p->literal()->object_size();
       count++;
@@ -560,15 +560,15 @@
 
 void SymbolTable::print() {
   for (int i = 0; i < the_table()->table_size(); ++i) {
-    HashtableEntry<Symbol*>** p = the_table()->bucket_addr(i);
-    HashtableEntry<Symbol*>* entry = the_table()->bucket(i);
+    HashtableEntry<Symbol*, mtSymbol>** p = the_table()->bucket_addr(i);
+    HashtableEntry<Symbol*, mtSymbol>* entry = the_table()->bucket(i);
     if (entry != NULL) {
       while (entry != NULL) {
         tty->print(PTR_FORMAT " ", entry->literal());
         entry->literal()->print();
         tty->print(" %d", entry->literal()->refcount());
         p = entry->next_addr();
-        entry = (HashtableEntry<Symbol*>*)HashtableEntry<Symbol*>::make_ptr(*p);
+        entry = (HashtableEntry<Symbol*, mtSymbol>*)HashtableEntry<Symbol*, mtSymbol>::make_ptr(*p);
       }
       tty->cr();
     }
@@ -631,7 +631,7 @@
 oop StringTable::lookup(int index, jchar* name,
                         int len, unsigned int hash) {
   int count = 0;
-  for (HashtableEntry<oop>* l = bucket(index); l != NULL; l = l->next()) {
+  for (HashtableEntry<oop, mtSymbol>* l = bucket(index); l != NULL; l = l->next()) {
     count++;
     if (l->hash() == hash) {
       if (java_lang_String::equals(l->literal(), name, len)) {
@@ -640,7 +640,7 @@
     }
   }
   // If the bucket size is too deep check if this hash code is insufficient.
-  if (count >= BasicHashtable::rehash_count && !needs_rehashing()) {
+  if (count >= BasicHashtable<mtSymbol>::rehash_count && !needs_rehashing()) {
     _needs_rehashing = check_rehash_table(count);
   }
   return NULL;
@@ -676,7 +676,7 @@
     return test;
   }
 
-  HashtableEntry<oop>* entry = new_entry(hashValue, string());
+  HashtableEntry<oop, mtSymbol>* entry = new_entry(hashValue, string());
   add_entry(index, entry);
   return string();
 }
@@ -761,8 +761,8 @@
   // entries at a safepoint.
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   for (int i = 0; i < the_table()->table_size(); ++i) {
-    HashtableEntry<oop>** p = the_table()->bucket_addr(i);
-    HashtableEntry<oop>* entry = the_table()->bucket(i);
+    HashtableEntry<oop, mtSymbol>** p = the_table()->bucket_addr(i);
+    HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i);
     while (entry != NULL) {
       // Shared entries are normally at the end of the bucket and if we run into
       // a shared entry, then there is nothing more to remove. However, if we
@@ -778,15 +778,15 @@
         *p = entry->next();
         the_table()->free_entry(entry);
       }
-      entry = (HashtableEntry<oop>*)HashtableEntry<oop>::make_ptr(*p);
+      entry = (HashtableEntry<oop, mtSymbol>*)HashtableEntry<oop, mtSymbol>::make_ptr(*p);
     }
   }
 }
 
 void StringTable::oops_do(OopClosure* f) {
   for (int i = 0; i < the_table()->table_size(); ++i) {
-    HashtableEntry<oop>** p = the_table()->bucket_addr(i);
-    HashtableEntry<oop>* entry = the_table()->bucket(i);
+    HashtableEntry<oop, mtSymbol>** p = the_table()->bucket_addr(i);
+    HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i);
     while (entry != NULL) {
       f->do_oop((oop*)entry->literal_addr());
 
@@ -798,14 +798,14 @@
       } else {
         p = entry->next_addr();
       }
-      entry = (HashtableEntry<oop>*)HashtableEntry<oop>::make_ptr(*p);
+      entry = (HashtableEntry<oop, mtSymbol>*)HashtableEntry<oop, mtSymbol>::make_ptr(*p);
     }
   }
 }
 
 void StringTable::verify() {
   for (int i = 0; i < the_table()->table_size(); ++i) {
-    HashtableEntry<oop>* p = the_table()->bucket(i);
+    HashtableEntry<oop, mtSymbol>* p = the_table()->bucket(i);
     for ( ; p != NULL; p = p->next()) {
       oop s = p->literal();
       guarantee(s != NULL, "interned string is NULL");
@@ -821,7 +821,7 @@
 void StringTable::dump(outputStream* st) {
   NumberSeq summary;
   for (int i = 0; i < the_table()->table_size(); ++i) {
-    HashtableEntry<oop>* p = the_table()->bucket(i);
+    HashtableEntry<oop, mtSymbol>* p = the_table()->bucket(i);
     int count = 0;
     for ( ; p != NULL; p = p->next()) {
       count++;
--- a/hotspot/src/share/vm/classfile/symbolTable.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/classfile/symbolTable.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -71,7 +71,7 @@
   operator Symbol*()                             { return _temp; }
 };
 
-class SymbolTable : public Hashtable<Symbol*> {
+class SymbolTable : public Hashtable<Symbol*, mtSymbol> {
   friend class VMStructs;
   friend class ClassFileParser;
 
@@ -113,10 +113,10 @@
   Symbol* lookup(int index, const char* name, int len, unsigned int hash);
 
   SymbolTable()
-    : Hashtable<Symbol*>(symbol_table_size, sizeof (HashtableEntry<Symbol*>)) {}
+    : Hashtable<Symbol*, mtSymbol>(symbol_table_size, sizeof (HashtableEntry<Symbol*, mtSymbol>)) {}
 
-  SymbolTable(HashtableBucket* t, int number_of_entries)
-    : Hashtable<Symbol*>(symbol_table_size, sizeof (HashtableEntry<Symbol*>), t,
+  SymbolTable(HashtableBucket<mtSymbol>* t, int number_of_entries)
+    : Hashtable<Symbol*, mtSymbol>(symbol_table_size, sizeof (HashtableEntry<Symbol*, mtSymbol>), t,
                 number_of_entries) {}
 
   // Arena for permanent symbols (null class loader) that are never unloaded
@@ -145,10 +145,10 @@
     initialize_symbols(symbol_alloc_arena_size);
   }
 
-  static void create_table(HashtableBucket* t, int length,
+  static void create_table(HashtableBucket<mtSymbol>* t, int length,
                            int number_of_entries) {
     assert(_the_table == NULL, "One symbol table allowed.");
-    assert(length == symbol_table_size * sizeof(HashtableBucket),
+    assert(length == symbol_table_size * sizeof(HashtableBucket<mtSymbol>),
            "bad shared symbol size.");
     _the_table = new SymbolTable(t, number_of_entries);
     // if CDS give symbol table a default arena size since most symbols
@@ -224,13 +224,13 @@
 
   // Sharing
   static void copy_buckets(char** top, char*end) {
-    the_table()->Hashtable<Symbol*>::copy_buckets(top, end);
+    the_table()->Hashtable<Symbol*, mtSymbol>::copy_buckets(top, end);
   }
   static void copy_table(char** top, char*end) {
-    the_table()->Hashtable<Symbol*>::copy_table(top, end);
+    the_table()->Hashtable<Symbol*, mtSymbol>::copy_table(top, end);
   }
   static void reverse(void* boundary = NULL) {
-    the_table()->Hashtable<Symbol*>::reverse(boundary);
+    the_table()->Hashtable<Symbol*, mtSymbol>::reverse(boundary);
   }
 
   // Rehash the symbol table if it gets out of balance
@@ -238,8 +238,7 @@
   static bool needs_rehashing()         { return _needs_rehashing; }
 };
 
-
-class StringTable : public Hashtable<oop> {
+class StringTable : public Hashtable<oop, mtSymbol> {
   friend class VMStructs;
 
 private:
@@ -256,11 +255,11 @@
 
   oop lookup(int index, jchar* chars, int length, unsigned int hashValue);
 
-  StringTable() : Hashtable<oop>((int)StringTableSize,
-                                 sizeof (HashtableEntry<oop>)) {}
+  StringTable() : Hashtable<oop, mtSymbol>((int)StringTableSize,
+                              sizeof (HashtableEntry<oop, mtSymbol>)) {}
 
-  StringTable(HashtableBucket* t, int number_of_entries)
-    : Hashtable<oop>((int)StringTableSize, sizeof (HashtableEntry<oop>), t,
+  StringTable(HashtableBucket<mtSymbol>* t, int number_of_entries)
+    : Hashtable<oop, mtSymbol>((int)StringTableSize, sizeof (HashtableEntry<oop, mtSymbol>), t,
                      number_of_entries) {}
 
   static bool use_alternate_hashcode()  { return _seed != 0; }
@@ -276,10 +275,10 @@
     _the_table = new StringTable();
   }
 
-  static void create_table(HashtableBucket* t, int length,
+  static void create_table(HashtableBucket<mtSymbol>* t, int length,
                            int number_of_entries) {
     assert(_the_table == NULL, "One string table allowed.");
-    assert((size_t)length == StringTableSize * sizeof(HashtableBucket),
+    assert((size_t)length == StringTableSize * sizeof(HashtableBucket<mtSymbol>),
            "bad shared string size.");
     _the_table = new StringTable(t, number_of_entries);
   }
@@ -313,13 +312,13 @@
 
   // Sharing
   static void copy_buckets(char** top, char*end) {
-    the_table()->Hashtable<oop>::copy_buckets(top, end);
+    the_table()->Hashtable<oop, mtSymbol>::copy_buckets(top, end);
   }
   static void copy_table(char** top, char*end) {
-    the_table()->Hashtable<oop>::copy_table(top, end);
+    the_table()->Hashtable<oop, mtSymbol>::copy_table(top, end);
   }
   static void reverse() {
-    the_table()->Hashtable<oop>::reverse();
+    the_table()->Hashtable<oop, mtSymbol>::reverse();
   }
 
   // Rehash the symbol table if it gets out of balance
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -1168,9 +1168,9 @@
 }
 
 
-void SystemDictionary::set_shared_dictionary(HashtableBucket* t, int length,
+void SystemDictionary::set_shared_dictionary(HashtableBucket<mtClass>* t, int length,
                                              int number_of_entries) {
-  assert(length == _nof_buckets * sizeof(HashtableBucket),
+  assert(length == _nof_buckets * sizeof(HashtableBucket<mtClass>),
          "bad shared dictionary size.");
   _shared_dictionary = new Dictionary(_nof_buckets, t, number_of_entries);
 }
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -32,6 +32,7 @@
 #include "runtime/java.hpp"
 #include "runtime/reflectionUtils.hpp"
 #include "utilities/hashtable.hpp"
+#include "utilities/hashtable.inline.hpp"
 
 // The system dictionary stores all loaded classes and maps:
 //
@@ -72,7 +73,7 @@
 class Dictionary;
 class PlaceholderTable;
 class LoaderConstraintTable;
-class HashtableBucket;
+template <MEMFLAGS F> class HashtableBucket;
 class ResolutionErrorTable;
 class SymbolPropertyTable;
 
@@ -363,7 +364,7 @@
   static void copy_buckets(char** top, char* end);
   static void copy_table(char** top, char* end);
   static void reverse();
-  static void set_shared_dictionary(HashtableBucket* t, int length,
+  static void set_shared_dictionary(HashtableBucket<mtClass>* t, int length,
                                     int number_of_entries);
   // Printing
   static void print()                   PRODUCT_RETURN;
--- a/hotspot/src/share/vm/code/codeBlob.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/code/codeBlob.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -144,7 +144,7 @@
   // chunk of memory, its your job to free it.
   if (p != NULL) {
     // We need to allocate a chunk big enough to hold the OopMapSet and all of its OopMaps
-    _oop_maps = (OopMapSet* )NEW_C_HEAP_ARRAY(unsigned char, p->heap_size());
+    _oop_maps = (OopMapSet* )NEW_C_HEAP_ARRAY(unsigned char, p->heap_size(), mtCode);
     p->copy_to((address)_oop_maps);
   } else {
     _oop_maps = NULL;
@@ -180,7 +180,7 @@
 
 void CodeBlob::flush() {
   if (_oop_maps) {
-    FREE_C_HEAP_ARRAY(unsigned char, _oop_maps);
+    FREE_C_HEAP_ARRAY(unsigned char, _oop_maps, mtCode);
     _oop_maps = NULL;
   }
   _comments.free();
--- a/hotspot/src/share/vm/code/codeCache.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/code/codeCache.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -856,7 +856,7 @@
 
   int bucketSize = 512;
   int bucketLimit = maxCodeSize / bucketSize + 1;
-  int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit);
+  int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
   memset(buckets,0,sizeof(int) * bucketLimit);
 
   for (cb = first(); cb != NULL; cb = next(cb)) {
@@ -893,7 +893,7 @@
     }
   }
 
-  FREE_C_HEAP_ARRAY(int, buckets);
+  FREE_C_HEAP_ARRAY(int, buckets, mtCode);
 }
 
 void CodeCache::print() {
--- a/hotspot/src/share/vm/code/codeCache.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/code/codeCache.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -88,6 +88,9 @@
   // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
   // what you are doing)
   static CodeBlob* find_blob_unsafe(void* start) {
+    // NMT can walk the stack before code cache is created
+    if (_heap == NULL) return NULL;
+
     CodeBlob* result = (CodeBlob*)_heap->find_start(start);
     // this assert is too strong because the heap code will return the
     // heapblock containing start. That block can often be larger than
--- a/hotspot/src/share/vm/code/nmethod.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -31,7 +31,7 @@
 // This class is used internally by nmethods, to cache
 // exception/pc/handler information.
 
-class ExceptionCache : public CHeapObj {
+class ExceptionCache : public CHeapObj<mtCode> {
   friend class VMStructs;
  private:
   enum { cache_size = 16 };
--- a/hotspot/src/share/vm/code/stubs.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/code/stubs.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -101,7 +101,7 @@
 // of the concrete stub (see also macro below). There's exactly
 // one stub interface instance required per stub queue.
 
-class StubInterface: public CHeapObj {
+class StubInterface: public CHeapObj<mtCode> {
  public:
   // Initialization/finalization
   virtual void    initialize(Stub* self, int size)         = 0; // called after creation (called twice if allocated via (request, commit))
@@ -152,7 +152,7 @@
 // A StubQueue maintains a queue of stubs.
 // Note: All sizes (spaces) are given in bytes.
 
-class StubQueue: public CHeapObj {
+class StubQueue: public CHeapObj<mtCode> {
   friend class VMStructs;
  private:
   StubInterface* _stub_interface;                // the interface prototype
--- a/hotspot/src/share/vm/compiler/abstractCompiler.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/compiler/abstractCompiler.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -29,7 +29,7 @@
 
 typedef void (*initializer)(void);
 
-class AbstractCompiler : public CHeapObj {
+class AbstractCompiler : public CHeapObj<mtCompiler> {
  private:
   bool _is_initialized; // Mark whether compiler object is initialized
 
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -951,7 +951,7 @@
   int compiler_count = c1_compiler_count + c2_compiler_count;
 
   _method_threads =
-    new (ResourceObj::C_HEAP) GrowableArray<CompilerThread*>(compiler_count, true);
+    new (ResourceObj::C_HEAP, mtCompiler) GrowableArray<CompilerThread*>(compiler_count, true);
 
   char name_buffer[256];
   for (int i = 0; i < c2_compiler_count; i++) {
@@ -1627,7 +1627,7 @@
       }
       fp = fopen(fileBuf, "at");
       if (fp != NULL) {
-        file = NEW_C_HEAP_ARRAY(char, strlen(fileBuf)+1);
+        file = NEW_C_HEAP_ARRAY(char, strlen(fileBuf)+1, mtCompiler);
         strcpy(file, fileBuf);
         break;
       }
@@ -1637,7 +1637,7 @@
     } else {
       if (LogCompilation && Verbose)
         tty->print_cr("Opening compilation log %s", file);
-      CompileLog* log = new(ResourceObj::C_HEAP) CompileLog(file, fp, thread_id);
+      CompileLog* log = new(ResourceObj::C_HEAP, mtCompiler) CompileLog(file, fp, thread_id);
       thread->init_log(log);
 
       if (xtty != NULL) {
--- a/hotspot/src/share/vm/compiler/compileBroker.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/compiler/compileBroker.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -36,7 +36,7 @@
 //
 // An entry in the compile queue.  It represents a pending or current
 // compilation.
-class CompileTask : public CHeapObj {
+class CompileTask : public CHeapObj<mtCompiler> {
   friend class VMStructs;
 
  private:
@@ -131,7 +131,7 @@
 //
 // Per Compiler Performance Counters.
 //
-class CompilerCounters : public CHeapObj {
+class CompilerCounters : public CHeapObj<mtCompiler> {
 
   public:
     enum {
@@ -175,7 +175,7 @@
 // CompileQueue
 //
 // A list of CompileTasks.
-class CompileQueue : public CHeapObj {
+class CompileQueue : public CHeapObj<mtCompiler> {
  private:
   const char* _name;
   Monitor*    _lock;
--- a/hotspot/src/share/vm/compiler/compileLog.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/compiler/compileLog.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -37,14 +37,14 @@
 CompileLog::CompileLog(const char* file, FILE* fp, intx thread_id)
   : _context(_context_buffer, sizeof(_context_buffer))
 {
-  initialize(new(ResourceObj::C_HEAP) fileStream(fp));
+  initialize(new(ResourceObj::C_HEAP, mtCompiler) fileStream(fp));
   _file = file;
   _file_end = 0;
   _thread_id = thread_id;
 
   _identities_limit = 0;
   _identities_capacity = 400;
-  _identities = NEW_C_HEAP_ARRAY(char, _identities_capacity);
+  _identities = NEW_C_HEAP_ARRAY(char, _identities_capacity, mtCompiler);
 
   // link into the global list
   { MutexLocker locker(CompileTaskAlloc_lock);
@@ -56,7 +56,7 @@
 CompileLog::~CompileLog() {
   delete _out;
   _out = NULL;
-  FREE_C_HEAP_ARRAY(char, _identities);
+  FREE_C_HEAP_ARRAY(char, _identities, mtCompiler);
 }
 
 
@@ -109,7 +109,7 @@
   if (id >= _identities_capacity) {
     int new_cap = _identities_capacity * 2;
     if (new_cap <= id)  new_cap = id + 100;
-    _identities = REALLOC_C_HEAP_ARRAY(char, _identities, new_cap);
+    _identities = REALLOC_C_HEAP_ARRAY(char, _identities, new_cap, mtCompiler);
     _identities_capacity = new_cap;
   }
   while (id >= _identities_limit) {
--- a/hotspot/src/share/vm/compiler/compilerOracle.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/compiler/compilerOracle.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -34,7 +34,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/jniHandles.hpp"
 
-class MethodMatcher : public CHeapObj {
+class MethodMatcher : public CHeapObj<mtCompiler> {
  public:
   enum Mode {
     Exact,
--- a/hotspot/src/share/vm/compiler/oopMap.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/compiler/oopMap.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -599,7 +599,7 @@
 
 #ifdef COMPILER2
 
-class DerivedPointerEntry : public CHeapObj {
+class DerivedPointerEntry : public CHeapObj<mtCompiler> {
  private:
   oop*     _location; // Location of derived pointer (also pointing to the base)
   intptr_t _offset;   // Offset from base pointer
@@ -621,7 +621,7 @@
   assert (!_active, "should not be active");
   assert(_list == NULL || _list->length() == 0, "table not empty");
   if (_list == NULL) {
-    _list = new (ResourceObj::C_HEAP) GrowableArray<DerivedPointerEntry*>(10, true); // Allocated on C heap
+    _list = new (ResourceObj::C_HEAP, mtCompiler) GrowableArray<DerivedPointerEntry*>(10, true); // Allocated on C heap
   }
   _active = true;
 }
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -617,7 +617,7 @@
 
 // A parallel-GC-thread-local allocation buffer for allocation into a
 // CompactibleFreeListSpace.
-class CFLS_LAB : public CHeapObj {
+class CFLS_LAB : public CHeapObj<mtGC> {
   // The space that this buffer allocates into.
   CompactibleFreeListSpace* _cfls;
 
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -174,7 +174,7 @@
 
 // This struct contains per-thread things necessary to support parallel
 // young-gen collection.
-class CMSParGCThreadState: public CHeapObj {
+class CMSParGCThreadState: public CHeapObj<mtGC> {
  public:
   CFLS_LAB lab;
   PromotionInfo promo;
@@ -229,7 +229,7 @@
   if (CollectedHeap::use_parallel_gc_threads()) {
     typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
     _par_gc_thread_states =
-      NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads);
+      NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
     if (_par_gc_thread_states == NULL) {
       vm_exit_during_initialization("Could not allocate par gc structs");
     }
@@ -687,7 +687,7 @@
         warning("task_queues allocation failure.");
         return;
       }
-      _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues);
+      _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
       if (_hash_seed == NULL) {
         warning("_hash_seed array allocation failure");
         return;
@@ -737,7 +737,7 @@
     assert(_young_gen != NULL, "no _young_gen");
     _eden_chunk_index = 0;
     _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
-    _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity);
+    _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
     if (_eden_chunk_array == NULL) {
       _eden_chunk_capacity = 0;
       warning("GC/CMS: _eden_chunk_array allocation failure");
@@ -750,35 +750,35 @@
     const size_t max_plab_samples =
       ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
 
-    _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads);
-    _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples);
-    _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads);
+    _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
+    _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
+    _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
     if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
         || _cursor == NULL) {
       warning("Failed to allocate survivor plab/chunk array");
       if (_survivor_plab_array  != NULL) {
-        FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
+        FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
         _survivor_plab_array = NULL;
       }
       if (_survivor_chunk_array != NULL) {
-        FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
+        FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
         _survivor_chunk_array = NULL;
       }
       if (_cursor != NULL) {
-        FREE_C_HEAP_ARRAY(size_t, _cursor);
+        FREE_C_HEAP_ARRAY(size_t, _cursor, mtGC);
         _cursor = NULL;
       }
     } else {
       _survivor_chunk_capacity = 2*max_plab_samples;
       for (uint i = 0; i < ParallelGCThreads; i++) {
-        HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples);
+        HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
         if (vec == NULL) {
           warning("Failed to allocate survivor plab array");
           for (int j = i; j > 0; j--) {
-            FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array());
+            FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array(), mtGC);
           }
-          FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
-          FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
+          FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
+          FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
           _survivor_plab_array = NULL;
           _survivor_chunk_array = NULL;
           _survivor_chunk_capacity = 0;
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -161,7 +161,7 @@
 
 // Represents a marking stack used by the CMS collector.
 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
-class CMSMarkStack: public CHeapObj  {
+class CMSMarkStack: public CHeapObj<mtGC>  {
   //
   friend class CMSCollector;   // to get at expasion stats further below
   //
@@ -265,7 +265,7 @@
 
 // Survivor Chunk Array in support of parallelization of
 // Survivor Space rescan.
-class ChunkArray: public CHeapObj {
+class ChunkArray: public CHeapObj<mtGC> {
   size_t _index;
   size_t _capacity;
   size_t _overflows;
@@ -506,7 +506,7 @@
 };
 
 
-class CMSCollector: public CHeapObj {
+class CMSCollector: public CHeapObj<mtGC> {
   friend class VMStructs;
   friend class ConcurrentMarkSweepThread;
   friend class ConcurrentMarkSweepGeneration;
@@ -553,8 +553,8 @@
   // The following array-pair keeps track of mark words
   // displaced for accomodating overflow list above.
   // This code will likely be revisited under RFE#4922830.
-  Stack<oop>     _preserved_oop_stack;
-  Stack<markOop> _preserved_mark_stack;
+  Stack<oop, mtGC>     _preserved_oop_stack;
+  Stack<markOop, mtGC> _preserved_mark_stack;
 
   int*             _hash_seed;
 
--- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -28,7 +28,7 @@
 #include "gc_implementation/g1/heapRegion.hpp"
 #include "utilities/growableArray.hpp"
 
-class CollectionSetChooser: public CHeapObj {
+class CollectionSetChooser: public CHeapObj<mtGC> {
 
   GrowableArray<HeapRegion*> _regions;
 
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -79,7 +79,7 @@
   _n_threads = _n_worker_threads + 1;
   reset_threshold_step();
 
-  _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads);
+  _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads, mtGC);
   int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids();
   ConcurrentG1RefineThread *next = NULL;
   for (int i = _n_threads - 1; i >= 0; i--) {
@@ -157,7 +157,7 @@
     _def_use_cache = true;
     _use_cache = true;
     _hot_cache_size = (1 << G1ConcRSLogCacheSize);
-    _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size);
+    _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC);
     _n_hot = 0;
     _hot_cache_idx = 0;
 
@@ -191,18 +191,18 @@
     // Please see the comment in allocate_card_count_cache
     // for why we call os::malloc() and os::free() directly.
     assert(_card_counts != NULL, "Logic");
-    os::free(_card_counts);
+    os::free(_card_counts, mtGC);
     assert(_card_epochs != NULL, "Logic");
-    os::free(_card_epochs);
+    os::free(_card_epochs, mtGC);
 
     assert(_hot_cache != NULL, "Logic");
-    FREE_C_HEAP_ARRAY(jbyte*, _hot_cache);
+    FREE_C_HEAP_ARRAY(jbyte*, _hot_cache, mtGC);
   }
   if (_threads != NULL) {
     for (int i = 0; i < _n_threads; i++) {
       delete _threads[i];
     }
-    FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads);
+    FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads, mtGC);
   }
 }
 
@@ -436,17 +436,17 @@
   size_t counts_size = n * sizeof(CardCountCacheEntry);
   size_t epochs_size = n * sizeof(CardEpochCacheEntry);
 
-  *counts = (CardCountCacheEntry*) os::malloc(counts_size);
+  *counts = (CardCountCacheEntry*) os::malloc(counts_size, mtGC);
   if (*counts == NULL) {
     // allocation was unsuccessful
     return false;
   }
 
-  *epochs = (CardEpochCacheEntry*) os::malloc(epochs_size);
+  *epochs = (CardEpochCacheEntry*) os::malloc(epochs_size, mtGC);
   if (*epochs == NULL) {
     // allocation was unsuccessful - free counts array
     assert(*counts != NULL, "must be");
-    os::free(*counts);
+    os::free(*counts, mtGC);
     *counts = NULL;
     return false;
   }
@@ -479,8 +479,8 @@
         // Allocation was successful.
         // We can just free the old arrays; we're
         // not interested in preserving the contents
-        if (_card_counts != NULL) os::free(_card_counts);
-        if (_card_epochs != NULL) os::free(_card_epochs);
+        if (_card_counts != NULL) os::free(_card_counts, mtGC);
+        if (_card_epochs != NULL) os::free(_card_epochs, mtGC);
 
         // Cache the size of the arrays and the index that got us there.
         _n_card_counts = cache_size;
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -34,7 +34,7 @@
 class ConcurrentG1RefineThread;
 class G1RemSet;
 
-class ConcurrentG1Refine: public CHeapObj {
+class ConcurrentG1Refine: public CHeapObj<mtGC> {
   ConcurrentG1RefineThread** _threads;
   int _n_threads;
   int _n_worker_threads;
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -42,6 +42,7 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
+#include "services/memTracker.hpp"
 
 // Concurrent marking bit map wrapper
 
@@ -53,6 +54,8 @@
   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
 
+  MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
+
   guarantee(brs.is_reserved(), "couldn't allocate concurrent marking bit map");
   // For now we'll just commit all of the bit map up fromt.
   // Later on we'll try to be more parsimonious with swap.
@@ -161,7 +164,7 @@
 {}
 
 void CMMarkStack::allocate(size_t size) {
-  _base = NEW_C_HEAP_ARRAY(oop, size);
+  _base = NEW_C_HEAP_ARRAY(oop, size, mtGC);
   if (_base == NULL) {
     vm_exit_during_initialization("Failed to allocate CM region mark stack");
   }
@@ -173,7 +176,7 @@
 
 CMMarkStack::~CMMarkStack() {
   if (_base != NULL) {
-    FREE_C_HEAP_ARRAY(oop, _base);
+    FREE_C_HEAP_ARRAY(oop, _base, mtGC);
   }
 }
 
@@ -480,11 +483,11 @@
 
   _root_regions.init(_g1h, this);
 
-  _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num);
-  _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num);
-
-  _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_task_num);
-  _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_task_num);
+  _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num, mtGC);
+  _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num, mtGC);
+
+  _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_task_num, mtGC);
+  _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_task_num, mtGC);
 
   BitMap::idx_t card_bm_size = _card_bm.size();
 
@@ -496,7 +499,7 @@
     _task_queues->register_queue(i, task_queue);
 
     _count_card_bitmaps[i] = BitMap(card_bm_size, false);
-    _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, (size_t) max_regions);
+    _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, (size_t) max_regions, mtGC);
 
     _tasks[i] = new CMTask(i, this,
                            _count_marked_bytes[i],
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -30,8 +30,8 @@
 
 class G1CollectedHeap;
 class CMTask;
-typedef GenericTaskQueue<oop>            CMTaskQueue;
-typedef GenericTaskQueueSet<CMTaskQueue> CMTaskQueueSet;
+typedef GenericTaskQueue<oop, mtGC>            CMTaskQueue;
+typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet;
 
 // Closure used by CM during concurrent reference discovery
 // and reference processing (during remarking) to determine
@@ -343,7 +343,7 @@
 
 class ConcurrentMarkThread;
 
-class ConcurrentMark : public CHeapObj {
+class ConcurrentMark: public CHeapObj<mtGC> {
   friend class ConcurrentMarkThread;
   friend class CMTask;
   friend class CMBitMapClosure;
--- a/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -32,7 +32,7 @@
 
 // A closure class for processing card table entries.  Note that we don't
 // require these closure objects to be stack-allocated.
-class CardTableEntryClosure: public CHeapObj {
+class CardTableEntryClosure: public CHeapObj<mtGC> {
 public:
   // Process the card whose card table entry is "card_ptr".  If returns
   // "false", terminate the iteration early.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -27,6 +27,7 @@
 #include "memory/space.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
+#include "services/memTracker.hpp"
 
 //////////////////////////////////////////////////////////////////////
 // G1BlockOffsetSharedArray
@@ -44,6 +45,9 @@
   if (!_vs.initialize(rs, 0)) {
     vm_exit_during_initialization("Could not reserve enough space for heap offset array");
   }
+
+  MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
+
   _offset_array = (u_char*)_vs.low_boundary();
   resize(init_word_size);
   if (TraceBlockOffsetTable) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -117,7 +117,7 @@
 
 // Here is the shared array type.
 
-class G1BlockOffsetSharedArray: public CHeapObj {
+class G1BlockOffsetSharedArray: public CHeapObj<mtGC> {
   friend class G1BlockOffsetArray;
   friend class G1BlockOffsetArrayContigSpace;
   friend class VMStructs;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -1916,14 +1916,14 @@
   assert(n_rem_sets > 0, "Invariant.");
 
   HeapRegionRemSetIterator** iter_arr =
-    NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues);
+    NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues, mtGC);
   for (int i = 0; i < n_queues; i++) {
     iter_arr[i] = new HeapRegionRemSetIterator();
   }
   _rem_set_iterator = iter_arr;
 
-  _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues);
-  _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues);
+  _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
+  _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
 
   for (int i = 0; i < n_queues; i++) {
     RefToScanQueue* q = new RefToScanQueue();
@@ -2082,7 +2082,7 @@
 
    _in_cset_fast_test_length = max_regions();
    _in_cset_fast_test_base =
-                   NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length);
+                   NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC);
 
    // We're biasing _in_cset_fast_test to avoid subtracting the
    // beginning of the heap every time we want to index; basically
@@ -3505,7 +3505,7 @@
 G1CollectedHeap::setup_surviving_young_words() {
   assert(_surviving_young_words == NULL, "pre-condition");
   uint array_length = g1_policy()->young_cset_region_length();
-  _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length);
+  _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
   if (_surviving_young_words == NULL) {
     vm_exit_out_of_memory(sizeof(size_t) * array_length,
                           "Not enough space for young surv words summary.");
@@ -3530,7 +3530,7 @@
 void
 G1CollectedHeap::cleanup_surviving_young_words() {
   guarantee( _surviving_young_words != NULL, "pre-condition" );
-  FREE_C_HEAP_ARRAY(size_t, _surviving_young_words);
+  FREE_C_HEAP_ARRAY(size_t, _surviving_young_words, mtGC);
   _surviving_young_words = NULL;
 }
 
@@ -4073,7 +4073,7 @@
 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
   _drain_in_progress = false;
   set_evac_failure_closure(cl);
-  _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
+  _evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);
 }
 
 void G1CollectedHeap::finalize_for_evac_failure() {
@@ -4207,9 +4207,9 @@
     if (_objs_with_preserved_marks == NULL) {
       assert(_preserved_marks_of_objs == NULL, "Both or none.");
       _objs_with_preserved_marks =
-        new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
+        new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);
       _preserved_marks_of_objs =
-        new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true);
+        new (ResourceObj::C_HEAP, mtGC) GrowableArray<markOop>(40, true);
     }
     _objs_with_preserved_marks->push(obj);
     _preserved_marks_of_objs->push(m);
@@ -4269,7 +4269,7 @@
   uint array_length = PADDING_ELEM_NUM +
                       real_length +
                       PADDING_ELEM_NUM;
-  _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
+  _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
   if (_surviving_young_words_base == NULL)
     vm_exit_out_of_memory(array_length * sizeof(size_t),
                           "Not enough space for young surv histo.");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -62,8 +62,8 @@
 class ConcurrentG1Refine;
 class GenerationCounters;
 
-typedef OverflowTaskQueue<StarTask>         RefToScanQueue;
-typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet;
+typedef OverflowTaskQueue<StarTask, mtGC>         RefToScanQueue;
+typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
 
 typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
@@ -74,7 +74,7 @@
   GCAllocPurposeCount
 };
 
-class YoungList : public CHeapObj {
+class YoungList : public CHeapObj<mtGC> {
 private:
   G1CollectedHeap* _g1h;
 
@@ -1772,7 +1772,7 @@
   G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num);
 
   ~G1ParScanThreadState() {
-    FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
+    FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
   }
 
   RefToScanQueue*   refs()            { return _refs;             }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -40,7 +40,7 @@
 // TraceGen0Time collects data on _both_ young and mixed evacuation pauses
 // (the latter may contain non-young regions - i.e. regions that are
 // technically in Gen1) while TraceGen1Time collects data about full GCs.
-class TraceGen0TimeData : public CHeapObj {
+class TraceGen0TimeData : public CHeapObj<mtGC> {
  private:
   unsigned  _young_pause_num;
   unsigned  _mixed_pause_num;
@@ -86,7 +86,7 @@
   void print() const;
 };
 
-class TraceGen1TimeData : public CHeapObj {
+class TraceGen1TimeData : public CHeapObj<mtGC> {
  private:
   NumberSeq _all_full_gc_times;
 
@@ -131,7 +131,7 @@
 //
 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
 // combined with either NewSize or MaxNewSize. (A warning message is printed.)
-class G1YoungGenSizer : public CHeapObj {
+class G1YoungGenSizer : public CHeapObj<mtGC> {
 private:
   enum SizerKind {
     SizerDefaults,
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -34,7 +34,7 @@
 /***** ALL TIMES ARE IN SECS!!!!!!! *****/
 
 // this is the "interface"
-class G1MMUTracker: public CHeapObj {
+class G1MMUTracker: public CHeapObj<mtGC> {
 protected:
   double          _time_slice;
   double          _max_gc_time; // this is per time slice
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -112,7 +112,7 @@
 // do which is important as we want to keep the eden region allocation
 // path as low-overhead as possible.
 
-class G1MonitoringSupport : public CHeapObj {
+class G1MonitoringSupport : public CHeapObj<mtGC> {
   friend class VMStructs;
 
   G1CollectedHeap* _g1h;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -75,7 +75,7 @@
 {
   _seq_task = new SubTasksDone(NumSeqTasks);
   guarantee(n_workers() > 0, "There should be some workers");
-  _cset_rs_update_cl = NEW_C_HEAP_ARRAY(OopsInHeapRegionClosure*, n_workers());
+  _cset_rs_update_cl = NEW_C_HEAP_ARRAY(OopsInHeapRegionClosure*, n_workers(), mtGC);
   for (uint i = 0; i < n_workers(); i++) {
     _cset_rs_update_cl[i] = NULL;
   }
@@ -86,7 +86,7 @@
   for (uint i = 0; i < n_workers(); i++) {
     assert(_cset_rs_update_cl[i] == NULL, "it should be");
   }
-  FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl);
+  FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl, mtGC);
 }
 
 void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
@@ -416,7 +416,7 @@
     // _seq_task->set_n_termination((int)n_workers());
   }
   guarantee( _cards_scanned == NULL, "invariant" );
-  _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers());
+  _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers(), mtGC);
   for (uint i = 0; i < n_workers(); ++i) {
     _cards_scanned[i] = 0;
   }
@@ -487,7 +487,7 @@
   for (uint i = 0; i < n_workers(); ++i) {
     _total_cards_scanned += _cards_scanned[i];
   }
-  FREE_C_HEAP_ARRAY(size_t, _cards_scanned);
+  FREE_C_HEAP_ARRAY(size_t, _cards_scanned, mtGC);
   _cards_scanned = NULL;
   // Cleanup after copy
   _g1->set_refine_cte_cl_concurrency(true);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -36,7 +36,7 @@
 // external heap references into it.  Uses a mod ref bs to track updates,
 // so that they can be used to update the individual region remsets.
 
-class G1RemSet: public CHeapObj {
+class G1RemSet: public CHeapObj<mtGC> {
 protected:
   G1CollectedHeap* _g1;
   unsigned _conc_refine_cards;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -39,7 +39,7 @@
 
 // OtherRegionsTable
 
-class PerRegionTable: public CHeapObj {
+class PerRegionTable: public CHeapObj<mtGC> {
   friend class OtherRegionsTable;
   friend class HeapRegionRemSetIterator;
 
@@ -282,7 +282,7 @@
     // We must recount the "occupied."
     recount_occupied();
 #endif
-    FREE_C_HEAP_ARRAY(PerRegionTable*, _par_tables);
+    FREE_C_HEAP_ARRAY(PerRegionTable*, _par_tables, mtGC);
     _par_tables = NULL;
 #if COUNT_PAR_EXPANDS
     Atomic::inc(&n_par_contracts);
@@ -385,7 +385,7 @@
       if (res != NULL) return;
       // Otherwise, we reserved the right to do the expansion.
 
-      PerRegionTable** ptables = NEW_C_HEAP_ARRAY(PerRegionTable*, n);
+      PerRegionTable** ptables = NEW_C_HEAP_ARRAY(PerRegionTable*, n, mtGC);
       for (int i = 0; i < n; i++) {
         PerRegionTable* ptable = PerRegionTable::alloc(hr());
         ptables[i] = ptable;
@@ -546,9 +546,9 @@
   _from_card_cache_max_regions = max_regions;
 
   int n_par_rs = HeapRegionRemSet::num_par_rem_sets();
-  _from_card_cache = NEW_C_HEAP_ARRAY(int*, n_par_rs);
+  _from_card_cache = NEW_C_HEAP_ARRAY(int*, n_par_rs, mtGC);
   for (int i = 0; i < n_par_rs; i++) {
-    _from_card_cache[i] = NEW_C_HEAP_ARRAY(int, max_regions);
+    _from_card_cache[i] = NEW_C_HEAP_ARRAY(int, max_regions, mtGC);
     for (size_t j = 0; j < max_regions; j++) {
       _from_card_cache[i][j] = -1;  // An invalid value.
     }
@@ -1333,9 +1333,9 @@
            && _recorded_cards == NULL
            && _recorded_regions == NULL,
            "Inv");
-    _recorded_oops    = NEW_C_HEAP_ARRAY(OopOrNarrowOopStar, MaxRecorded);
-    _recorded_cards   = NEW_C_HEAP_ARRAY(HeapWord*,          MaxRecorded);
-    _recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*,        MaxRecorded);
+    _recorded_oops    = NEW_C_HEAP_ARRAY(OopOrNarrowOopStar, MaxRecorded, mtGC);
+    _recorded_cards   = NEW_C_HEAP_ARRAY(HeapWord*,          MaxRecorded, mtGC);
+    _recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*,        MaxRecorded, mtGC);
   }
   if (_n_recorded == MaxRecorded) {
     gclog_or_tty->print_cr("Filled up 'recorded' (%d).", MaxRecorded);
@@ -1356,8 +1356,8 @@
     assert(_n_recorded_events == 0
            && _recorded_event_index == NULL,
            "Inv");
-    _recorded_events = NEW_C_HEAP_ARRAY(Event, MaxRecordedEvents);
-    _recorded_event_index = NEW_C_HEAP_ARRAY(int, MaxRecordedEvents);
+    _recorded_events = NEW_C_HEAP_ARRAY(Event, MaxRecordedEvents, mtGC);
+    _recorded_event_index = NEW_C_HEAP_ARRAY(int, MaxRecordedEvents, mtGC);
   }
   if (_n_recorded_events == MaxRecordedEvents) {
     gclog_or_tty->print_cr("Filled up 'recorded_events' (%d).", MaxRecordedEvents);
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -173,7 +173,7 @@
   static void print_from_card_cache();
 };
 
-class HeapRegionRemSet : public CHeapObj {
+class HeapRegionRemSet : public CHeapObj<mtGC> {
   friend class VMStructs;
   friend class HeapRegionRemSetIterator;
 
@@ -360,7 +360,7 @@
 #endif
 };
 
-class HeapRegionRemSetIterator : public CHeapObj {
+class HeapRegionRemSetIterator : public CHeapObj<mtGC> {
 
   // The region over which we're iterating.
   const HeapRegionRemSet* _hrrs;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -86,7 +86,7 @@
   _allocated_length = 0;
   _max_length = max_length;
 
-  _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length);
+  _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length, mtGC);
   memset(_regions, 0, (size_t) max_length * sizeof(HeapRegion*));
   _regions_biased = _regions - ((uintx) bottom >> _region_shift);
 
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -53,7 +53,7 @@
 //
 // and maintain that: _length <= _allocated_length <= _max_length
 
-class HeapRegionSeq: public CHeapObj {
+class HeapRegionSeq: public CHeapObj<mtGC> {
   friend class VMStructs;
 
   // The array that holds the HeapRegions.
--- a/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -126,7 +126,7 @@
     return res;
   } else {
     // Allocate space for the BufferNode in front of the buffer.
-    char *b =  NEW_C_HEAP_ARRAY(char, _sz + BufferNode::aligned_size());
+    char *b =  NEW_C_HEAP_ARRAY(char, _sz + BufferNode::aligned_size(), mtGC);
     return BufferNode::make_buffer_from_block(b);
   }
 }
@@ -149,7 +149,7 @@
     assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong.");
     void* b = BufferNode::make_block_from_node(_buf_free_list);
     _buf_free_list = _buf_free_list->next();
-    FREE_C_HEAP_ARRAY(char, b);
+    FREE_C_HEAP_ARRAY(char, b, mtGC);
     _buf_free_list_sz --;
     n--;
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -208,7 +208,7 @@
   PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1);
   _shared_satb_queue.set_lock(lock);
   if (ParallelGCThreads > 0) {
-    _par_closures = NEW_C_HEAP_ARRAY(ObjectClosure*, ParallelGCThreads);
+    _par_closures = NEW_C_HEAP_ARRAY(ObjectClosure*, ParallelGCThreads, mtGC);
   }
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -148,8 +148,8 @@
 RSHashTable::RSHashTable(size_t capacity) :
   _capacity(capacity), _capacity_mask(capacity-1),
   _occupied_entries(0), _occupied_cards(0),
-  _entries((SparsePRTEntry*)NEW_C_HEAP_ARRAY(char, SparsePRTEntry::size() * capacity)),
-  _buckets(NEW_C_HEAP_ARRAY(int, capacity)),
+  _entries((SparsePRTEntry*)NEW_C_HEAP_ARRAY(char, SparsePRTEntry::size() * capacity, mtGC)),
+  _buckets(NEW_C_HEAP_ARRAY(int, capacity, mtGC)),
   _free_list(NullEntry), _free_region(0)
 {
   clear();
@@ -157,11 +157,11 @@
 
 RSHashTable::~RSHashTable() {
   if (_entries != NULL) {
-    FREE_C_HEAP_ARRAY(SparsePRTEntry, _entries);
+    FREE_C_HEAP_ARRAY(SparsePRTEntry, _entries, mtGC);
     _entries = NULL;
   }
   if (_buckets != NULL) {
-    FREE_C_HEAP_ARRAY(int, _buckets);
+    FREE_C_HEAP_ARRAY(int, _buckets, mtGC);
     _buckets = NULL;
   }
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -42,7 +42,7 @@
 // insertions only enqueue old versions for deletions, but do not delete
 // old versions synchronously.
 
-class SparsePRTEntry: public CHeapObj {
+class SparsePRTEntry: public CHeapObj<mtGC> {
 public:
   enum SomePublicConstants {
     NullEntry     = -1,
@@ -101,7 +101,7 @@
 };
 
 
-class RSHashTable : public CHeapObj {
+class RSHashTable : public CHeapObj<mtGC> {
 
   friend class RSHashTableIter;
 
--- a/hotspot/src/share/vm/gc_implementation/g1/survRateGroup.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/survRateGroup.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -43,7 +43,7 @@
   reset();
   if (summary_surv_rates_len > 0) {
     size_t length = summary_surv_rates_len;
-    _summary_surv_rates = NEW_C_HEAP_ARRAY(NumberSeq*, length);
+      _summary_surv_rates = NEW_C_HEAP_ARRAY(NumberSeq*, length, mtGC);
     for (size_t i = 0; i < length; ++i) {
       _summary_surv_rates[i] = new NumberSeq();
     }
@@ -90,9 +90,9 @@
     double* old_accum_surv_rate_pred = _accum_surv_rate_pred;
     TruncatedSeq** old_surv_rate_pred = _surv_rate_pred;
 
-    _surv_rate = NEW_C_HEAP_ARRAY(double, _region_num);
-    _accum_surv_rate_pred = NEW_C_HEAP_ARRAY(double, _region_num);
-    _surv_rate_pred = NEW_C_HEAP_ARRAY(TruncatedSeq*, _region_num);
+    _surv_rate = NEW_C_HEAP_ARRAY(double, _region_num, mtGC);
+    _accum_surv_rate_pred = NEW_C_HEAP_ARRAY(double, _region_num, mtGC);
+    _surv_rate_pred = NEW_C_HEAP_ARRAY(TruncatedSeq*, _region_num, mtGC);
 
     for (size_t i = 0; i < _stats_arrays_length; ++i) {
       _surv_rate_pred[i] = old_surv_rate_pred[i];
@@ -104,13 +104,13 @@
     _stats_arrays_length = _region_num;
 
     if (old_surv_rate != NULL) {
-      FREE_C_HEAP_ARRAY(double, old_surv_rate);
+      FREE_C_HEAP_ARRAY(double, old_surv_rate, mtGC);
     }
     if (old_accum_surv_rate_pred != NULL) {
-      FREE_C_HEAP_ARRAY(double, old_accum_surv_rate_pred);
+      FREE_C_HEAP_ARRAY(double, old_accum_surv_rate_pred, mtGC);
     }
     if (old_surv_rate_pred != NULL) {
-      FREE_C_HEAP_ARRAY(TruncatedSeq*, old_surv_rate_pred);
+      FREE_C_HEAP_ARRAY(TruncatedSeq*, old_surv_rate_pred, mtGC);
     }
   }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/survRateGroup.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/survRateGroup.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -29,7 +29,7 @@
 
 class G1CollectorPolicy;
 
-class SurvRateGroup : public CHeapObj {
+class SurvRateGroup : public CHeapObj<mtGC> {
 private:
   G1CollectorPolicy* _g1p;
   const char* _name;
--- a/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -457,12 +457,12 @@
         if (_lowest_non_clean[i] != NULL) {
           assert(n_chunks != _lowest_non_clean_chunk_size[i],
                  "logical consequence");
-          FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]);
+          FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i], mtGC);
           _lowest_non_clean[i] = NULL;
         }
         // Now allocate a new one if necessary.
         if (_lowest_non_clean[i] == NULL) {
-          _lowest_non_clean[i]                  = NEW_C_HEAP_ARRAY(CardPtr, n_chunks);
+          _lowest_non_clean[i]                  = NEW_C_HEAP_ARRAY(CardPtr, n_chunks, mtGC);
           _lowest_non_clean_chunk_size[i]       = n_chunks;
           _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start());
           for (int j = 0; j < (int)n_chunks; j++)
--- a/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -35,7 +35,7 @@
 class PLABStats;
 
 // A per-thread allocation buffer used during GC.
-class ParGCAllocBuffer: public CHeapObj {
+class ParGCAllocBuffer: public CHeapObj<mtGC> {
 protected:
   char head[32];
   size_t _word_sz;          // in HeapWord units
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -59,7 +59,7 @@
                                        Generation* old_gen_,
                                        int thread_num_,
                                        ObjToScanQueueSet* work_queue_set_,
-                                       Stack<oop>* overflow_stacks_,
+                                       Stack<oop, mtGC>* overflow_stacks_,
                                        size_t desired_plab_sz_,
                                        ParallelTaskTerminator& term_) :
   _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_),
@@ -184,7 +184,7 @@
   assert(ParGCUseLocalOverflow, "Else should not call");
   assert(young_gen()->overflow_list() == NULL, "Error");
   ObjToScanQueue* queue = work_queue();
-  Stack<oop>* const of_stack = overflow_stack();
+  Stack<oop, mtGC>* const of_stack = overflow_stack();
   const size_t num_overflow_elems = of_stack->size();
   const size_t space_available = queue->max_elems() - queue->size();
   const size_t num_take_elems = MIN3(space_available / 4,
@@ -297,7 +297,7 @@
                         ParNewGeneration&       gen,
                         Generation&             old_gen,
                         ObjToScanQueueSet&      queue_set,
-                        Stack<oop>*             overflow_stacks_,
+                        Stack<oop, mtGC>*       overflow_stacks_,
                         size_t                  desired_plab_sz,
                         ParallelTaskTerminator& term);
 
@@ -331,7 +331,7 @@
 ParScanThreadStateSet::ParScanThreadStateSet(
   int num_threads, Space& to_space, ParNewGeneration& gen,
   Generation& old_gen, ObjToScanQueueSet& queue_set,
-  Stack<oop>* overflow_stacks,
+  Stack<oop, mtGC>* overflow_stacks,
   size_t desired_plab_sz, ParallelTaskTerminator& term)
   : ResourceArray(sizeof(ParScanThreadState), num_threads),
     _gen(gen), _next_gen(old_gen), _term(term)
@@ -649,9 +649,14 @@
 
   _overflow_stacks = NULL;
   if (ParGCUseLocalOverflow) {
-    _overflow_stacks = NEW_C_HEAP_ARRAY(Stack<oop>, ParallelGCThreads);
+
+    // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal
+    // with ','
+    typedef Stack<oop, mtGC> GCOopStack;
+
+    _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC);
     for (size_t i = 0; i < ParallelGCThreads; ++i) {
-      new (_overflow_stacks + i) Stack<oop>();
+      new (_overflow_stacks + i) Stack<oop, mtGC>();
     }
   }
 
@@ -1401,7 +1406,7 @@
     assert(_num_par_pushes > 0, "Tautology");
 #endif
     if (from_space_obj->forwardee() == from_space_obj) {
-      oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1);
+      oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC);
       listhead->forward_to(from_space_obj);
       from_space_obj = listhead;
     }
@@ -1553,7 +1558,7 @@
       // This can become a scaling bottleneck when there is work queue overflow coincident
       // with promotion failure.
       oopDesc* f = cur;
-      FREE_C_HEAP_ARRAY(oopDesc, f);
+      FREE_C_HEAP_ARRAY(oopDesc, f, mtGC);
     } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
       assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
       obj_to_push = cur;
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -41,7 +41,7 @@
 // in genOopClosures.inline.hpp.
 
 typedef Padded<OopTaskQueue> ObjToScanQueue;
-typedef GenericTaskQueueSet<ObjToScanQueue> ObjToScanQueueSet;
+typedef GenericTaskQueueSet<ObjToScanQueue, mtGC> ObjToScanQueueSet;
 
 class ParKeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
  private:
@@ -59,7 +59,7 @@
   friend class ParScanThreadStateSet;
  private:
   ObjToScanQueue *_work_queue;
-  Stack<oop>* const _overflow_stack;
+  Stack<oop, mtGC>* const _overflow_stack;
 
   ParGCAllocBuffer _to_space_alloc_buffer;
 
@@ -127,7 +127,7 @@
   ParScanThreadState(Space* to_space_, ParNewGeneration* gen_,
                      Generation* old_gen_, int thread_num_,
                      ObjToScanQueueSet* work_queue_set_,
-                     Stack<oop>* overflow_stacks_,
+                     Stack<oop, mtGC>* overflow_stacks_,
                      size_t desired_plab_sz_,
                      ParallelTaskTerminator& term_);
 
@@ -151,7 +151,7 @@
   void trim_queues(int max_size);
 
   // Private overflow stack usage
-  Stack<oop>* overflow_stack() { return _overflow_stack; }
+  Stack<oop, mtGC>* overflow_stack() { return _overflow_stack; }
   bool take_from_overflow_stack();
   void push_on_overflow_stack(oop p);
 
@@ -312,7 +312,7 @@
   ObjToScanQueueSet* _task_queues;
 
   // Per-worker-thread local overflow stacks
-  Stack<oop>* _overflow_stacks;
+  Stack<oop, mtGC>* _overflow_stacks;
 
   // Desired size of survivor space plab's
   PLABStats _plab_stats;
--- a/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -32,7 +32,7 @@
 class ParScanThreadState;
 class ParNewGeneration;
 typedef Padded<OopTaskQueue> ObjToScanQueue;
-typedef GenericTaskQueueSet<ObjToScanQueue> ObjToScanQueueSet;
+typedef GenericTaskQueueSet<ObjToScanQueue, mtGC> ObjToScanQueueSet;
 class ParallelTaskTerminator;
 
 class ParScanClosure: public OopsInGenClosure {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -40,7 +40,7 @@
 // must be shrunk.  Adjusting the boundary between the generations
 // is called for in this class.
 
-class AdjoiningGenerations : public CHeapObj {
+class AdjoiningGenerations : public CHeapObj<mtGC> {
   friend class VMStructs;
  private:
   // The young generation and old generation, respectively
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -116,7 +116,7 @@
 }
 
 GCTaskQueue* GCTaskQueue::create_on_c_heap() {
-  GCTaskQueue* result = new(ResourceObj::C_HEAP) GCTaskQueue(true);
+  GCTaskQueue* result = new(ResourceObj::C_HEAP, mtGC) GCTaskQueue(true);
   if (TraceGCTaskQueue) {
     tty->print_cr("GCTaskQueue::create_on_c_heap()"
                   " returns " INTPTR_FORMAT,
@@ -403,19 +403,19 @@
   _queue = SynchronizedGCTaskQueue::create(unsynchronized_queue, lock());
   _noop_task = NoopGCTask::create_on_c_heap();
   _idle_inactive_task = WaitForBarrierGCTask::create_on_c_heap();
-  _resource_flag = NEW_C_HEAP_ARRAY(bool, workers());
+  _resource_flag = NEW_C_HEAP_ARRAY(bool, workers(), mtGC);
   {
     // Set up worker threads.
     //     Distribute the workers among the available processors,
     //     unless we were told not to, or if the os doesn't want to.
-    uint* processor_assignment = NEW_C_HEAP_ARRAY(uint, workers());
+    uint* processor_assignment = NEW_C_HEAP_ARRAY(uint, workers(), mtGC);
     if (!BindGCTaskThreadsToCPUs ||
         !os::distribute_processes(workers(), processor_assignment)) {
       for (uint a = 0; a < workers(); a += 1) {
         processor_assignment[a] = sentinel_worker();
       }
     }
-    _thread = NEW_C_HEAP_ARRAY(GCTaskThread*, workers());
+    _thread = NEW_C_HEAP_ARRAY(GCTaskThread*, workers(), mtGC);
     for (uint t = 0; t < workers(); t += 1) {
       set_thread(t, GCTaskThread::create(this, t, processor_assignment[t]));
     }
@@ -426,7 +426,7 @@
       }
       tty->cr();
     }
-    FREE_C_HEAP_ARRAY(uint, processor_assignment);
+    FREE_C_HEAP_ARRAY(uint, processor_assignment, mtGC);
   }
   reset_busy_workers();
   set_unblocked();
@@ -455,11 +455,11 @@
       GCTaskThread::destroy(thread(i));
       set_thread(i, NULL);
     }
-    FREE_C_HEAP_ARRAY(GCTaskThread*, _thread);
+    FREE_C_HEAP_ARRAY(GCTaskThread*, _thread, mtGC);
     _thread = NULL;
   }
   if (_resource_flag != NULL) {
-    FREE_C_HEAP_ARRAY(bool, _resource_flag);
+    FREE_C_HEAP_ARRAY(bool, _resource_flag, mtGC);
     _resource_flag = NULL;
   }
   if (queue() != NULL) {
@@ -817,7 +817,7 @@
 }
 
 NoopGCTask* NoopGCTask::create_on_c_heap() {
-  NoopGCTask* result = new(ResourceObj::C_HEAP) NoopGCTask(true);
+  NoopGCTask* result = new(ResourceObj::C_HEAP, mtGC) NoopGCTask(true);
   return result;
 }
 
@@ -848,7 +848,7 @@
 }
 
 IdleGCTask* IdleGCTask::create_on_c_heap() {
-  IdleGCTask* result = new(ResourceObj::C_HEAP) IdleGCTask(true);
+  IdleGCTask* result = new(ResourceObj::C_HEAP, mtGC) IdleGCTask(true);
   assert(UseDynamicNumberOfGCThreads,
     "Should only be used with dynamic GC thread");
   return result;
@@ -984,7 +984,7 @@
 
 WaitForBarrierGCTask* WaitForBarrierGCTask::create_on_c_heap() {
   WaitForBarrierGCTask* result =
-    new (ResourceObj::C_HEAP) WaitForBarrierGCTask(true);
+    new (ResourceObj::C_HEAP, mtGC) WaitForBarrierGCTask(true);
   return result;
 }
 
@@ -1114,7 +1114,7 @@
     // Lazy initialization.
     if (freelist() == NULL) {
       _freelist =
-        new(ResourceObj::C_HEAP) GrowableArray<Monitor*>(ParallelGCThreads,
+        new(ResourceObj::C_HEAP, mtGC) GrowableArray<Monitor*>(ParallelGCThreads,
                                                          true);
     }
     if (! freelist()->is_empty()) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -216,7 +216,7 @@
 
 // A GCTaskQueue that can be synchronized.
 // This "has-a" GCTaskQueue and a mutex to do the exclusion.
-class SynchronizedGCTaskQueue : public CHeapObj {
+class SynchronizedGCTaskQueue : public CHeapObj<mtGC> {
 private:
   // Instance state.
   GCTaskQueue* _unsynchronized_queue;   // Has-a unsynchronized queue.
@@ -278,7 +278,7 @@
 
 // This is an abstract base class for getting notifications
 // when a GCTaskManager is done.
-class NotifyDoneClosure : public CHeapObj {
+class NotifyDoneClosure : public CHeapObj<mtGC> {
 public:
   // The notification callback method.
   virtual void notify(GCTaskManager* manager) = 0;
@@ -355,7 +355,7 @@
 // held in the GCTaskThread** _thread array in GCTaskManager.
 
 
-class GCTaskManager : public CHeapObj {
+class GCTaskManager : public CHeapObj<mtGC> {
  friend class ParCompactionManager;
  friend class PSParallelCompact;
  friend class PSScavenge;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -46,7 +46,7 @@
     vm_exit_out_of_memory(0, "Cannot create GC thread. Out of system resources.");
 
   if (PrintGCTaskTimeStamps) {
-    _time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries );
+    _time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries, mtGC);
 
     guarantee(_time_stamps != NULL, "Sanity");
   }
@@ -56,7 +56,7 @@
 
 GCTaskThread::~GCTaskThread() {
   if (_time_stamps != NULL) {
-    FREE_C_HEAP_ARRAY(GCTaskTimeStamp, _time_stamps);
+    FREE_C_HEAP_ARRAY(GCTaskTimeStamp, _time_stamps, mtGC);
   }
 }
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -90,7 +90,7 @@
   void set_is_working(bool v) { _is_working = v; }
 };
 
-class GCTaskTimeStamp : public CHeapObj
+class GCTaskTimeStamp : public CHeapObj<mtGC>
 {
  private:
   jlong  _entry_time;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -28,6 +28,7 @@
 #include "memory/cardTableModRefBS.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
+#include "services/memTracker.hpp"
 
 void ObjectStartArray::initialize(MemRegion reserved_region) {
   // We're based on the assumption that we use the same
@@ -50,6 +51,7 @@
   if (!backing_store.is_reserved()) {
     vm_exit_during_initialization("Could not reserve space for ObjectStartArray");
   }
+  MemTracker::record_virtual_memory_type((address)backing_store.base(), mtGC);
 
   // We do not commit any memory initially
   if (!_virtual_space.initialize(backing_store, 0)) {
@@ -57,10 +59,14 @@
   }
 
   _raw_base = (jbyte*)_virtual_space.low_boundary();
+
   if (_raw_base == NULL) {
     vm_exit_during_initialization("Could not get raw_base address");
   }
 
+  MemTracker::record_virtual_memory_type((address)_raw_base, mtGC);
+
+
   _offset_base = _raw_base - (size_t(reserved_region.start()) >> block_shift);
 
   _covered_region.set_start(reserved_region.start());
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -35,7 +35,7 @@
 // covered region.
 //
 
-class ObjectStartArray : public CHeapObj {
+class ObjectStartArray : public CHeapObj<mtGC> {
  friend class VerifyObjectStartArrayClosure;
 
  private:
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -29,6 +29,7 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/os.hpp"
 #include "utilities/bitMap.inline.hpp"
+#include "services/memTracker.hpp"
 #ifdef TARGET_OS_FAMILY_linux
 # include "os_linux.inline.hpp"
 #endif
@@ -61,6 +62,9 @@
   ReservedSpace rs(bytes, rs_align, rs_align > 0);
   os::trace_page_sizes("par bitmap", raw_bytes, raw_bytes, page_sz,
                        rs.base(), rs.size());
+
+  MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
+
   _virtual_space = new PSVirtualSpace(rs, page_sz);
   if (_virtual_space != NULL && _virtual_space->expand_by(bytes)) {
     _region_start = covered_region.start();
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -32,7 +32,7 @@
 class oopDesc;
 class ParMarkBitMapClosure;
 
-class ParMarkBitMap: public CHeapObj
+class ParMarkBitMap: public CHeapObj<mtGC>
 {
 public:
   typedef BitMap::idx_t idx_t;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -40,6 +40,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
 #include "runtime/vmThread.hpp"
+#include "services/memTracker.hpp"
 #include "utilities/vmError.hpp"
 
 PSYoungGen*  ParallelScavengeHeap::_young_gen = NULL;
@@ -161,6 +162,8 @@
     }
   }
 
+  MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap);
+
   os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz,
                        heap_rs.base(), pg_max_size);
   os::trace_page_sizes("ps main", og_min_size + yg_min_size,
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -81,14 +81,14 @@
   uint parallel_gc_threads = PSParallelCompact::gc_task_manager()->workers();
 
   assert(_manager_array == NULL, "Attempt to initialize twice");
-  _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1 );
+  _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1, mtGC);
   guarantee(_manager_array != NULL, "Could not allocate manager_array");
 
   _region_list = NEW_C_HEAP_ARRAY(RegionTaskQueue*,
-                                         parallel_gc_threads+1);
+                         parallel_gc_threads+1, mtGC);
   guarantee(_region_list != NULL, "Could not initialize promotion manager");
 
-  _recycled_stack_index = NEW_C_HEAP_ARRAY(uint, parallel_gc_threads);
+  _recycled_stack_index = NEW_C_HEAP_ARRAY(uint, parallel_gc_threads, mtGC);
 
   // parallel_gc-threads + 1 to be consistent with the number of
   // compaction managers.
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -41,7 +41,7 @@
 class ParallelCompactData;
 class ParMarkBitMap;
 
-class ParCompactionManager : public CHeapObj {
+class ParCompactionManager : public CHeapObj<mtGC> {
   friend class ParallelTaskTerminator;
   friend class ParMarkBitMap;
   friend class PSParallelCompact;
@@ -66,8 +66,8 @@
  private:
   // 32-bit:  4K * 8 = 32KiB; 64-bit:  8K * 16 = 128KiB
   #define QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
-  typedef OverflowTaskQueue<ObjArrayTask, QUEUE_SIZE> ObjArrayTaskQueue;
-  typedef GenericTaskQueueSet<ObjArrayTaskQueue>      ObjArrayTaskQueueSet;
+  typedef OverflowTaskQueue<ObjArrayTask, mtGC, QUEUE_SIZE> ObjArrayTaskQueue;
+  typedef GenericTaskQueueSet<ObjArrayTaskQueue, mtGC>      ObjArrayTaskQueueSet;
   #undef QUEUE_SIZE
 
   static ParCompactionManager** _manager_array;
@@ -78,7 +78,7 @@
   static PSOldGen*              _old_gen;
 
 private:
-  OverflowTaskQueue<oop>        _marking_stack;
+  OverflowTaskQueue<oop, mtGC>        _marking_stack;
   ObjArrayTaskQueue             _objarray_stack;
 
   // Is there a way to reuse the _marking_stack for the
@@ -110,8 +110,8 @@
   // popped.  If -1, there has not been any entry popped.
   static int                      _recycled_bottom;
 
-  Stack<Klass*>                 _revisit_klass_stack;
-  Stack<DataLayout*>            _revisit_mdo_stack;
+  Stack<Klass*, mtGC>                 _revisit_klass_stack;
+  Stack<DataLayout*, mtGC>            _revisit_mdo_stack;
 
   static ParMarkBitMap* _mark_bitmap;
 
@@ -126,7 +126,7 @@
  protected:
   // Array of tasks.  Needed by the ParallelTaskTerminator.
   static RegionTaskQueueSet* region_array()      { return _region_array; }
-  OverflowTaskQueue<oop>*  marking_stack()       { return &_marking_stack; }
+  OverflowTaskQueue<oop, mtGC>*  marking_stack()       { return &_marking_stack; }
 
   // Pushes onto the marking stack.  If the marking stack is full,
   // pushes onto the overflow stack.
@@ -175,8 +175,8 @@
   bool should_update();
   bool should_copy();
 
-  Stack<Klass*>* revisit_klass_stack() { return &_revisit_klass_stack; }
-  Stack<DataLayout*>* revisit_mdo_stack() { return &_revisit_mdo_stack; }
+  Stack<Klass*, mtGC>* revisit_klass_stack() { return &_revisit_klass_stack; }
+  Stack<DataLayout*, mtGC>* revisit_mdo_stack() { return &_revisit_mdo_stack; }
 
   // Save for later processing.  Must not fail.
   inline void push(oop obj) { _marking_stack.push(obj); }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGenerationCounters.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGenerationCounters.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -40,7 +40,7 @@
 
     const char* cns = PerfDataManager::name_space("generation", ordinal);
 
-    _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1);
+    _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC);
     strcpy(_name_space, cns);
 
     const char* cname = PerfDataManager::counter_name(_name_space, "name");
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -34,7 +34,7 @@
 
 class ObjectStartArray;
 
-class PSMarkSweepDecorator: public CHeapObj {
+class PSMarkSweepDecorator: public CHeapObj<mtGC> {
  private:
   static PSMarkSweepDecorator* _destination_decorator;
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -34,7 +34,7 @@
 
 class PSMarkSweepDecorator;
 
-class PSOldGen : public CHeapObj {
+class PSOldGen : public CHeapObj<mtGC> {
   friend class VMStructs;
   friend class PSPromotionManager; // Uses the cas_allocate methods
   friend class ParallelScavengeHeap;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -53,6 +53,7 @@
 #include "runtime/vmThread.hpp"
 #include "services/management.hpp"
 #include "services/memoryService.hpp"
+#include "services/memTracker.hpp"
 #include "utilities/events.hpp"
 #include "utilities/stack.inline.hpp"
 
@@ -405,6 +406,9 @@
   ReservedSpace rs(bytes, rs_align, rs_align > 0);
   os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(),
                        rs.size());
+
+  MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
+
   PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
   if (vspace != 0) {
     if (vspace->expand_by(bytes)) {
@@ -2732,7 +2736,7 @@
   for (uint i = 0; i < ParallelGCThreads + 1; i++) {
     ParCompactionManager* cm = ParCompactionManager::manager_array(i);
     KeepAliveClosure keep_alive_closure(cm);
-    Stack<Klass*>* const rks = cm->revisit_klass_stack();
+    Stack<Klass*, mtGC>* const rks = cm->revisit_klass_stack();
     if (PrintRevisitStats) {
       gclog_or_tty->print_cr("Revisit klass stack[%u] length = " SIZE_FORMAT,
                              i, rks->size());
@@ -2765,7 +2769,7 @@
   }
   for (uint i = 0; i < ParallelGCThreads + 1; i++) {
     ParCompactionManager* cm = ParCompactionManager::manager_array(i);
-    Stack<DataLayout*>* rms = cm->revisit_mdo_stack();
+    Stack<DataLayout*, mtGC>* rms = cm->revisit_mdo_stack();
     if (PrintRevisitStats) {
       gclog_or_tty->print_cr("Revisit MDO stack[%u] size = " SIZE_FORMAT,
                              i, rms->size());
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -36,7 +36,7 @@
 
 class ObjectStartArray;
 
-class PSPromotionLAB : public CHeapObj {
+class PSPromotionLAB : public CHeapObj<mtGC> {
  protected:
   static size_t filler_header_size;
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -45,7 +45,7 @@
   _young_space = heap->young_gen()->to_space();
 
   assert(_manager_array == NULL, "Attempt to initialize twice");
-  _manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1 );
+  _manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1, mtGC);
   guarantee(_manager_array != NULL, "Could not initialize promotion manager");
 
   _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -49,7 +49,7 @@
 class PSOldGen;
 class ParCompactionManager;
 
-class PSPromotionManager : public CHeapObj {
+class PSPromotionManager : public CHeapObj<mtGC> {
   friend class PSScavenge;
   friend class PSRefProcTaskExecutor;
  private:
@@ -77,7 +77,7 @@
   bool                                _old_gen_is_full;
 
   OopStarTaskQueue                    _claimed_stack_depth;
-  OverflowTaskQueue<oop>              _claimed_stack_breadth;
+  OverflowTaskQueue<oop, mtGC>        _claimed_stack_breadth;
 
   bool                                _totally_drain;
   uint                                _target_stack_size;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -62,8 +62,8 @@
 int                        PSScavenge::_tenuring_threshold = 0;
 HeapWord*                  PSScavenge::_young_generation_boundary = NULL;
 elapsedTimer               PSScavenge::_accumulated_time;
-Stack<markOop>             PSScavenge::_preserved_mark_stack;
-Stack<oop>                 PSScavenge::_preserved_oop_stack;
+Stack<markOop, mtGC>       PSScavenge::_preserved_mark_stack;
+Stack<oop, mtGC>           PSScavenge::_preserved_oop_stack;
 CollectorCounters*         PSScavenge::_counters = NULL;
 bool                       PSScavenge::_promotion_failed = false;
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -71,8 +71,8 @@
   static HeapWord*           _young_generation_boundary; // The lowest address possible for the young_gen.
                                                          // This is used to decide if an oop should be scavenged,
                                                          // cards should be marked, etc.
-  static Stack<markOop>          _preserved_mark_stack; // List of marks to be restored after failed promotion
-  static Stack<oop>              _preserved_oop_stack;  // List of oops that need their mark restored.
+  static Stack<markOop, mtGC> _preserved_mark_stack; // List of marks to be restored after failed promotion
+  static Stack<oop, mtGC>     _preserved_oop_stack;  // List of oops that need their mark restored.
   static CollectorCounters*      _counters;         // collector performance counters
   static bool                    _promotion_failed;
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -32,7 +32,7 @@
 // VirtualSpace is data structure for committing a previously reserved address
 // range in smaller chunks.
 
-class PSVirtualSpace : public CHeapObj {
+class PSVirtualSpace : public CHeapObj<mtGC> {
   friend class VMStructs;
  protected:
   // The space is committed/uncommited in chunks of size _alignment.  The
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -33,7 +33,7 @@
 
 class PSMarkSweepDecorator;
 
-class PSYoungGen : public CHeapObj {
+class PSYoungGen : public CHeapObj<mtGC> {
   friend class VMStructs;
   friend class ParallelScavengeHeap;
   friend class AdjoiningGenerations;
--- a/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -38,7 +38,7 @@
 class elapsedTimer;
 class CollectorPolicy;
 
-class AdaptiveSizePolicy : public CHeapObj {
+class AdaptiveSizePolicy : public CHeapObj<mtGC> {
  friend class GCAdaptivePolicyCounters;
  friend class PSGCAdaptivePolicyCounters;
  friend class CMSGCAdaptivePolicyCounters;
--- a/hotspot/src/share/vm/gc_implementation/shared/cSpaceCounters.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/cSpaceCounters.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -37,7 +37,7 @@
     const char* cns = PerfDataManager::name_space(gc->name_space(), "space",
                                                   ordinal);
 
-    _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1);
+    _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC);
     strcpy(_name_space, cns);
 
     const char* cname = PerfDataManager::counter_name(_name_space, "name");
--- a/hotspot/src/share/vm/gc_implementation/shared/cSpaceCounters.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/cSpaceCounters.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -32,7 +32,7 @@
 // A CSpaceCounters is a holder class for performance counters
 // that track a space;
 
-class CSpaceCounters: public CHeapObj {
+class CSpaceCounters: public CHeapObj<mtGC> {
   friend class VMStructs;
 
  private:
@@ -52,7 +52,7 @@
                  ContiguousSpace* s, GenerationCounters* gc);
 
   ~CSpaceCounters() {
-      if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
+      if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space, mtInternal);
   }
 
   inline void update_capacity() {
--- a/hotspot/src/share/vm/gc_implementation/shared/collectorCounters.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/collectorCounters.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -34,7 +34,7 @@
 
     const char* cns = PerfDataManager::name_space("collector", ordinal);
 
-    _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1);
+    _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC);
     strcpy(_name_space, cns);
 
     char* cname = PerfDataManager::counter_name(_name_space, "name");
--- a/hotspot/src/share/vm/gc_implementation/shared/collectorCounters.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/collectorCounters.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -30,7 +30,7 @@
 // CollectorCounters is a holder class for performance counters
 // that track a collector
 
-class CollectorCounters: public CHeapObj {
+class CollectorCounters: public CHeapObj<mtGC> {
   friend class VMStructs;
 
   private:
@@ -50,7 +50,7 @@
     CollectorCounters(const char* name, int ordinal);
 
     ~CollectorCounters() {
-      if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
+      if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space, mtGC);
     }
 
     inline PerfCounter* invocation_counter() const  { return _invocations; }
--- a/hotspot/src/share/vm/gc_implementation/shared/gSpaceCounters.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/gSpaceCounters.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -41,7 +41,7 @@
     const char* cns = PerfDataManager::name_space(gc->name_space(), "space",
                                                   ordinal);
 
-    _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1);
+    _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC);
     strcpy(_name_space, cns);
 
     const char* cname = PerfDataManager::counter_name(_name_space, "name");
--- a/hotspot/src/share/vm/gc_implementation/shared/gSpaceCounters.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/gSpaceCounters.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -34,7 +34,7 @@
 // A GSpaceCounter is a holder class for performance counters
 // that track a space;
 
-class GSpaceCounters: public CHeapObj {
+class GSpaceCounters: public CHeapObj<mtGC> {
   friend class VMStructs;
 
  private:
@@ -54,7 +54,7 @@
                  GenerationCounters* gc, bool sampled=true);
 
   ~GSpaceCounters() {
-    if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
+    if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space, mtGC);
   }
 
   inline void update_capacity() {
--- a/hotspot/src/share/vm/gc_implementation/shared/gcPolicyCounters.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcPolicyCounters.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -30,7 +30,7 @@
 // GCPolicyCounters is a holder class for performance counters
 // that track a generation
 
-class GCPolicyCounters: public CHeapObj {
+class GCPolicyCounters: public CHeapObj<mtGC> {
   friend class VMStructs;
 
   private:
--- a/hotspot/src/share/vm/gc_implementation/shared/gcStats.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcStats.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -27,7 +27,7 @@
 
 #include "gc_implementation/shared/gcUtil.hpp"
 
-class GCStats : public CHeapObj {
+class GCStats : public CHeapObj<mtGC> {
  protected:
   // Avg amount promoted; used for avoiding promotion undo
   // This class does not update deviations if the sample is zero.
--- a/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -43,7 +43,7 @@
 //
 // This serves as our best estimate of a future unknown.
 //
-class AdaptiveWeightedAverage : public CHeapObj {
+class AdaptiveWeightedAverage : public CHeapObj<mtGC> {
  private:
   float            _average;        // The last computed average
   unsigned         _sample_count;   // How often we've sampled this average
@@ -146,7 +146,7 @@
   // Placement support
   void* operator new(size_t ignored, void* p) { return p; }
   // Allocator
-  void* operator new(size_t size) { return CHeapObj::operator new(size); }
+  void* operator new(size_t size) { return CHeapObj<mtGC>::operator new(size); }
 
   // Accessor
   float padded_average() const         { return _padded_avg; }
@@ -192,7 +192,7 @@
 // equation.
 //              y = intercept + slope * x
 
-class LinearLeastSquareFit : public CHeapObj {
+class LinearLeastSquareFit : public CHeapObj<mtGC> {
   double _sum_x;        // sum of all independent data points x
   double _sum_x_squared; // sum of all independent data points x**2
   double _sum_y;        // sum of all dependent data points y
--- a/hotspot/src/share/vm/gc_implementation/shared/generationCounters.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/generationCounters.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -35,7 +35,7 @@
 
     const char* cns = PerfDataManager::name_space("generation", ordinal);
 
-    _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1);
+    _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC);
     strcpy(_name_space, cns);
 
     const char* cname = PerfDataManager::counter_name(_name_space, "name");
--- a/hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -31,7 +31,7 @@
 // A GenerationCounter is a holder class for performance counters
 // that track a generation
 
-class GenerationCounters: public CHeapObj {
+class GenerationCounters: public CHeapObj<mtGC> {
   friend class VMStructs;
 
 private:
@@ -69,7 +69,7 @@
                      VirtualSpace* v);
 
   ~GenerationCounters() {
-    if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
+    if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space, mtGC);
   }
 
   virtual void update_all();
--- a/hotspot/src/share/vm/gc_implementation/shared/hSpaceCounters.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/hSpaceCounters.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -40,7 +40,7 @@
     const char* cns =
       PerfDataManager::name_space(gc->name_space(), "space", ordinal);
 
-    _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1);
+    _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC);
     strcpy(_name_space, cns);
 
     const char* cname = PerfDataManager::counter_name(_name_space, "name");
--- a/hotspot/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -37,7 +37,7 @@
 class HeapSpaceUsedHelper;
 class G1SpaceMonitoringSupport;
 
-class HSpaceCounters: public CHeapObj {
+class HSpaceCounters: public CHeapObj<mtGC> {
   friend class VMStructs;
 
  private:
@@ -55,7 +55,7 @@
                  size_t initial_capacity, GenerationCounters* gc);
 
   ~HSpaceCounters() {
-    if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
+    if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space, mtGC);
   }
 
   inline void update_capacity(size_t v) {
--- a/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -33,7 +33,7 @@
 // Invariant: bottom() and end() are on page_size boundaries and
 // bottom() <= end()
 
-class ImmutableSpace: public CHeapObj {
+class ImmutableSpace: public CHeapObj<mtGC> {
   friend class VMStructs;
  protected:
   HeapWord* _bottom;
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -30,13 +30,13 @@
 #include "oops/objArrayKlass.inline.hpp"
 #include "oops/oop.inline.hpp"
 
-Stack<oop>              MarkSweep::_marking_stack;
-Stack<DataLayout*>      MarkSweep::_revisit_mdo_stack;
-Stack<Klass*>           MarkSweep::_revisit_klass_stack;
-Stack<ObjArrayTask>     MarkSweep::_objarray_stack;
+Stack<oop, mtGC>              MarkSweep::_marking_stack;
+Stack<DataLayout*, mtGC>      MarkSweep::_revisit_mdo_stack;
+Stack<Klass*, mtGC>           MarkSweep::_revisit_klass_stack;
+Stack<ObjArrayTask, mtGC>     MarkSweep::_objarray_stack;
 
-Stack<oop>              MarkSweep::_preserved_oop_stack;
-Stack<markOop>          MarkSweep::_preserved_mark_stack;
+Stack<oop, mtGC>              MarkSweep::_preserved_oop_stack;
+Stack<markOop, mtGC>          MarkSweep::_preserved_mark_stack;
 size_t                  MarkSweep::_preserved_count = 0;
 size_t                  MarkSweep::_preserved_count_max = 0;
 PreservedMark*          MarkSweep::_preserved_marks = NULL;
@@ -166,7 +166,7 @@
   }
 
   // deal with the overflow stack
-  StackIterator<oop> iter(_preserved_oop_stack);
+  StackIterator<oop, mtGC> iter(_preserved_oop_stack);
   while (!iter.is_empty()) {
     oop* p = iter.next_addr();
     adjust_pointer(p);
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -122,16 +122,16 @@
   //
  protected:
   // Traversal stacks used during phase1
-  static Stack<oop>                      _marking_stack;
-  static Stack<ObjArrayTask>             _objarray_stack;
+  static Stack<oop, mtGC>                      _marking_stack;
+  static Stack<ObjArrayTask, mtGC>             _objarray_stack;
   // Stack for live klasses to revisit at end of marking phase
-  static Stack<Klass*>                   _revisit_klass_stack;
+  static Stack<Klass*, mtGC>                   _revisit_klass_stack;
   // Set (stack) of MDO's to revisit at end of marking phase
-  static Stack<DataLayout*>              _revisit_mdo_stack;
+  static Stack<DataLayout*, mtGC>              _revisit_mdo_stack;
 
   // Space for storing/restoring mark word
-  static Stack<markOop>                  _preserved_mark_stack;
-  static Stack<oop>                      _preserved_oop_stack;
+  static Stack<markOop, mtGC>                  _preserved_mark_stack;
+  static Stack<oop, mtGC>                      _preserved_oop_stack;
   static size_t                          _preserved_count;
   static size_t                          _preserved_count_max;
   static PreservedMark*                  _preserved_marks;
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -43,7 +43,7 @@
 
 
 MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment) {
-  _lgrp_spaces = new (ResourceObj::C_HEAP) GrowableArray<LGRPSpace*>(0, true);
+  _lgrp_spaces = new (ResourceObj::C_HEAP, mtGC) GrowableArray<LGRPSpace*>(0, true);
   _page_size = os::vm_page_size();
   _adaptation_cycles = 0;
   _samples_count = 0;
@@ -231,7 +231,7 @@
   if (force || changed) {
     // Compute lgrp intersection. Add/remove spaces.
     int lgrp_limit = (int)os::numa_get_groups_num();
-    int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit);
+    int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtGC);
     int lgrp_num = (int)os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
     assert(lgrp_num > 0, "There should be at least one locality group");
     // Add new spaces for the new nodes
@@ -265,7 +265,7 @@
       }
     }
 
-    FREE_C_HEAP_ARRAY(int, lgrp_ids);
+    FREE_C_HEAP_ARRAY(int, lgrp_ids, mtGC);
 
     if (changed) {
       for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -63,7 +63,7 @@
 class MutableNUMASpace : public MutableSpace {
   friend class VMStructs;
 
-  class LGRPSpace : public CHeapObj {
+  class LGRPSpace : public CHeapObj<mtGC> {
     int _lgrp_id;
     MutableSpace* _space;
     MemRegion _invalid_region;
--- a/hotspot/src/share/vm/gc_implementation/shared/spaceCounters.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/spaceCounters.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -39,7 +39,7 @@
     const char* cns = PerfDataManager::name_space(gc->name_space(), "space",
                                                   ordinal);
 
-    _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1);
+    _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC);
     strcpy(_name_space, cns);
 
     const char* cname = PerfDataManager::counter_name(_name_space, "name");
--- a/hotspot/src/share/vm/gc_implementation/shared/spaceCounters.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/spaceCounters.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -35,7 +35,7 @@
 // A SpaceCounter is a holder class for performance counters
 // that track a space;
 
-class SpaceCounters: public CHeapObj {
+class SpaceCounters: public CHeapObj<mtGC> {
   friend class VMStructs;
 
  private:
@@ -55,7 +55,7 @@
                 MutableSpace* m, GenerationCounters* gc);
 
   ~SpaceCounters() {
-    if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
+    if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space, mtGC);
   }
 
   inline void update_capacity() {
--- a/hotspot/src/share/vm/gc_implementation/shared/spaceDecorator.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/spaceDecorator.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -70,7 +70,7 @@
 // These subclasses abstract the differences in the types of spaces used
 // by each heap.
 
-class SpaceMangler: public CHeapObj {
+class SpaceMangler: public CHeapObj<mtGC> {
   friend class VMStructs;
 
   // High water mark for allocations.  Typically, the space above
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -74,7 +74,7 @@
 //     G1CollectedHeap
 //   ParallelScavengeHeap
 //
-class CollectedHeap : public CHeapObj {
+class CollectedHeap : public CHeapObj<mtInternal> {
   friend class VMStructs;
   friend class IsGCActiveMark; // Block structured external access to _is_gc_active
   friend class constantPoolCacheKlass; // allocate() method inserts is_conc_safe
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -1118,8 +1118,8 @@
                                       SignatureHandlerLibrary::buffer_size);
   _buffer = bb->code_begin();
 
-  _fingerprints = new(ResourceObj::C_HEAP)GrowableArray<uint64_t>(32, true);
-  _handlers     = new(ResourceObj::C_HEAP)GrowableArray<address>(32, true);
+  _fingerprints = new(ResourceObj::C_HEAP, mtCode)GrowableArray<uint64_t>(32, true);
+  _handlers     = new(ResourceObj::C_HEAP, mtCode)GrowableArray<address>(32, true);
 }
 
 address SignatureHandlerLibrary::set_handler(CodeBuffer* buffer) {
--- a/hotspot/src/share/vm/interpreter/oopMapCache.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/interpreter/oopMapCache.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -348,7 +348,7 @@
   if (mask_size() > small_mask_limit) {
     assert(_bit_mask[0] == 0, "bit mask should be new or just flushed");
     _bit_mask[0] = (intptr_t)
-      NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size());
+      NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size(), mtClass);
   }
 }
 
@@ -356,7 +356,7 @@
   if (mask_size() > small_mask_limit && _bit_mask[0] != 0) {
     assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]),
       "This bit mask should not be in the resource area");
-    FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]);
+    FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0], mtClass);
     debug_only(_bit_mask[0] = 0;)
   }
 }
@@ -506,7 +506,7 @@
 OopMapCache::OopMapCache() :
   _mut(Mutex::leaf, "An OopMapCache lock", true)
 {
-  _array  = NEW_C_HEAP_ARRAY(OopMapCacheEntry, _size);
+  _array  = NEW_C_HEAP_ARRAY(OopMapCacheEntry, _size, mtClass);
   // Cannot call flush for initialization, since flush
   // will check if memory should be deallocated
   for(int i = 0; i < _size; i++) _array[i].initialize();
@@ -520,7 +520,7 @@
   flush();
   // Deallocate array
   NOT_PRODUCT(_total_memory_usage -= sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
-  FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array);
+  FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array, mtClass);
 }
 
 OopMapCacheEntry* OopMapCache::entry_at(int i) const {
@@ -639,9 +639,9 @@
 
 void OopMapCache::compute_one_oop_map(methodHandle method, int bci, InterpreterOopMap* entry) {
   // Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack
-  OopMapCacheEntry* tmp = NEW_C_HEAP_ARRAY(OopMapCacheEntry, 1);
+  OopMapCacheEntry* tmp = NEW_C_HEAP_ARRAY(OopMapCacheEntry, 1, mtClass);
   tmp->initialize();
   tmp->fill(method, bci);
   entry->resource_copy(tmp);
-  FREE_C_HEAP_ARRAY(OopMapCacheEntry, tmp);
+  FREE_C_HEAP_ARRAY(OopMapCacheEntry, tmp, mtInternal);
 }
--- a/hotspot/src/share/vm/interpreter/oopMapCache.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/interpreter/oopMapCache.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -156,7 +156,7 @@
 #endif
 };
 
-class OopMapCache : public CHeapObj {
+class OopMapCache : public CHeapObj<mtClass> {
  private:
   enum { _size        = 32,     // Use fixed size for now
          _probe_depth = 3       // probe depth in case of collisions
--- a/hotspot/src/share/vm/libadt/set.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/libadt/set.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -71,7 +71,7 @@
   set.Sort();                   // Sort elements for in-order retrieval
 
   uint len = 128;               // Total string space
-  char *buf = NEW_C_HEAP_ARRAY(char,len);// Some initial string space
+  char *buf = NEW_C_HEAP_ARRAY(char,len, mtCompiler);// Some initial string space
 
   register char *s = buf;       // Current working string pointer
   *s++ = '{';
@@ -86,7 +86,7 @@
       if( buf+len-s < 25 ) {      // Generous trailing space for upcoming numbers
         int offset = (int)(s-buf);// Not enuf space; compute offset into buffer
         len <<= 1;                // Double string size
-        buf = REALLOC_C_HEAP_ARRAY(char,buf,len); // Reallocate doubled size
+        buf = REALLOC_C_HEAP_ARRAY(char,buf,len, mtCompiler); // Reallocate doubled size
         s = buf+offset;         // Get working pointer into new bigger buffer
       }
       if( lo != (uint)-2 ) {    // Startup?  No!  Then print previous range.
@@ -101,7 +101,7 @@
     if( buf+len-s < 25 ) {      // Generous trailing space for upcoming numbers
       int offset = (int)(s-buf);// Not enuf space; compute offset into buffer
       len <<= 1;                // Double string size
-      buf = (char*)ReallocateHeap(buf,len); // Reallocate doubled size
+      buf = (char*)ReallocateHeap(buf,len, mtCompiler); // Reallocate doubled size
       s = buf+offset;           // Get working pointer into new bigger buffer
     }
     if( lo != hi ) sprintf(s,"%d-%d}",lo,hi);
--- a/hotspot/src/share/vm/libadt/vectset.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/libadt/vectset.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -362,7 +362,7 @@
 };
 
 SetI_ *VectorSet::iterate(uint &elem) const {
-  return new(ResourceObj::C_HEAP) VSetI_(this, elem);
+  return new(ResourceObj::C_HEAP, mtInternal) VSetI_(this, elem);
 }
 
 //=============================================================================
--- a/hotspot/src/share/vm/memory/allocation.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/allocation.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -26,10 +26,13 @@
 #include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "runtime/task.hpp"
 #include "runtime/threadCritical.hpp"
+#include "services/memTracker.hpp"
 #include "utilities/ostream.hpp"
+
 #ifdef TARGET_OS_FAMILY_linux
 # include "os_linux.inline.hpp"
 #endif
@@ -43,32 +46,16 @@
 # include "os_bsd.inline.hpp"
 #endif
 
-void* CHeapObj::operator new(size_t size){
-  return (void *) AllocateHeap(size, "CHeapObj-new");
-}
-
-void* CHeapObj::operator new (size_t size, const std::nothrow_t&  nothrow_constant) {
-  char* p = (char*) os::malloc(size);
-#ifdef ASSERT
-  if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
-#endif
-  return p;
-}
-
-void CHeapObj::operator delete(void* p){
- FreeHeap(p);
-}
-
 void* StackObj::operator new(size_t size)  { ShouldNotCallThis(); return 0; };
 void  StackObj::operator delete(void* p)   { ShouldNotCallThis(); };
 void* _ValueObj::operator new(size_t size)  { ShouldNotCallThis(); return 0; };
 void  _ValueObj::operator delete(void* p)   { ShouldNotCallThis(); };
 
-void* ResourceObj::operator new(size_t size, allocation_type type) {
+void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) {
   address res;
   switch (type) {
    case C_HEAP:
-    res = (address)AllocateHeap(size, "C_Heap: ResourceOBJ");
+    res = (address)AllocateHeap(size, flags, CALLER_PC);
     DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
     break;
    case RESOURCE_AREA:
@@ -184,7 +171,7 @@
 
 // MT-safe pool of chunks to reduce malloc/free thrashing
 // NB: not using Mutex because pools are used before Threads are initialized
-class ChunkPool {
+class ChunkPool: public CHeapObj<mtInternal> {
   Chunk*       _first;        // first cached Chunk; its first word points to next chunk
   size_t       _num_chunks;   // number of unused chunks in pool
   size_t       _num_used;     // number of chunks currently checked out
@@ -210,14 +197,16 @@
    ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
 
   // Allocate a new chunk from the pool (might expand the pool)
-  void* allocate(size_t bytes) {
+  _NOINLINE_ void* allocate(size_t bytes) {
     assert(bytes == _size, "bad size");
     void* p = NULL;
+    // No VM lock can be taken inside ThreadCritical lock, so os::malloc
+    // should be done outside ThreadCritical lock due to NMT
     { ThreadCritical tc;
       _num_used++;
       p = get_first();
-      if (p == NULL) p = os::malloc(bytes);
     }
+    if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
     if (p == NULL)
       vm_exit_out_of_memory(bytes, "ChunkPool::allocate");
 
@@ -238,28 +227,34 @@
 
   // Prune the pool
   void free_all_but(size_t n) {
+    Chunk* cur = NULL;
+    Chunk* next;
+    {
     // if we have more than n chunks, free all of them
     ThreadCritical tc;
     if (_num_chunks > n) {
       // free chunks at end of queue, for better locality
-      Chunk* cur = _first;
+        cur = _first;
       for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
 
       if (cur != NULL) {
-        Chunk* next = cur->next();
+          next = cur->next();
         cur->set_next(NULL);
         cur = next;
 
-        // Free all remaining chunks
+          _num_chunks = n;
+        }
+      }
+    }
+
+    // Free all remaining chunks, outside of ThreadCritical
+    // to avoid deadlock with NMT
         while(cur != NULL) {
           next = cur->next();
-          os::free(cur);
-          _num_chunks--;
+      os::free(cur, mtChunk);
           cur = next;
         }
       }
-    }
-  }
 
   // Accessors to preallocated pool's
   static ChunkPool* large_pool()  { assert(_large_pool  != NULL, "must be initialized"); return _large_pool;  }
@@ -323,7 +318,7 @@
    case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes);
    case Chunk::init_size:   return ChunkPool::small_pool()->allocate(bytes);
    default: {
-     void *p =  os::malloc(bytes);
+     void *p =  os::malloc(bytes, mtChunk, CALLER_PC);
      if (p == NULL)
        vm_exit_out_of_memory(bytes, "Chunk::new");
      return p;
@@ -337,7 +332,7 @@
    case Chunk::size:        ChunkPool::large_pool()->free(c); break;
    case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
    case Chunk::init_size:   ChunkPool::small_pool()->free(c); break;
-   default:                 os::free(c);
+   default:                 os::free(c, mtChunk);
   }
 }
 
@@ -374,6 +369,7 @@
 }
 
 //------------------------------Arena------------------------------------------
+NOT_PRODUCT(volatile jint Arena::_instance_count = 0;)
 
 Arena::Arena(size_t init_size) {
   size_t round_size = (sizeof (char *)) - 1;
@@ -382,6 +378,7 @@
   _hwm = _chunk->bottom();      // Save the cached hwm, max
   _max = _chunk->top();
   set_size_in_bytes(init_size);
+  NOT_PRODUCT(Atomic::inc(&_instance_count);)
 }
 
 Arena::Arena() {
@@ -389,12 +386,15 @@
   _hwm = _chunk->bottom();      // Save the cached hwm, max
   _max = _chunk->top();
   set_size_in_bytes(Chunk::init_size);
+  NOT_PRODUCT(Atomic::inc(&_instance_count);)
 }
 
 Arena::Arena(Arena *a) : _chunk(a->_chunk), _hwm(a->_hwm), _max(a->_max), _first(a->_first) {
   set_size_in_bytes(a->size_in_bytes());
+  NOT_PRODUCT(Atomic::inc(&_instance_count);)
 }
 
+
 Arena *Arena::move_contents(Arena *copy) {
   copy->destruct_contents();
   copy->_chunk = _chunk;
@@ -409,6 +409,42 @@
 
 Arena::~Arena() {
   destruct_contents();
+  NOT_PRODUCT(Atomic::dec(&_instance_count);)
+}
+
+void* Arena::operator new(size_t size) {
+  assert(false, "Use dynamic memory type binding");
+  return NULL;
+}
+
+void* Arena::operator new (size_t size, const std::nothrow_t&  nothrow_constant) {
+  assert(false, "Use dynamic memory type binding");
+  return NULL;
+}
+
+  // dynamic memory type binding
+void* Arena::operator new(size_t size, MEMFLAGS flags) {
+#ifdef ASSERT
+  void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC);
+  if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
+  return p;
+#else
+  return (void *) AllocateHeap(size, flags|otArena, CALLER_PC);
+#endif
+}
+
+void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) {
+#ifdef ASSERT
+  void* p = os::malloc(size, flags|otArena, CALLER_PC);
+  if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
+  return p;
+#else
+  return os::malloc(size, flags|otArena, CALLER_PC);
+#endif
+}
+
+void Arena::operator delete(void* p) {
+  FreeHeap(p);
 }
 
 // Destroy this arenas contents and reset to empty
@@ -421,6 +457,14 @@
   reset();
 }
 
+// This is high traffic method, but many calls actually don't
+// change the size
+void Arena::set_size_in_bytes(size_t size) {
+  if (_size_in_bytes != size) {
+    _size_in_bytes = size;
+    MemTracker::record_arena_size((address)this, size);
+  }
+}
 
 // Total of all Chunks in arena
 size_t Arena::used() const {
@@ -448,7 +492,6 @@
   if (_chunk == NULL) {
     signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow");
   }
-
   if (k) k->set_next(_chunk);   // Append new chunk to end of linked list
   else _first = _chunk;
   _hwm  = _chunk->bottom();     // Save the cached hwm, max
@@ -538,7 +581,7 @@
   assert(UseMallocOnly, "shouldn't call");
   // use malloc, but save pointer in res. area for later freeing
   char** save = (char**)internal_malloc_4(sizeof(char*));
-  return (*save = (char*)os::malloc(size));
+  return (*save = (char*)os::malloc(size, mtChunk));
 }
 
 // for debugging with UseMallocOnly
--- a/hotspot/src/share/vm/memory/allocation.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/allocation.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,6 +40,18 @@
 #define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1))
 #define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK)
 
+
+// noinline attribute
+#ifdef _WINDOWS
+  #define _NOINLINE_  __declspec(noinline)
+#else
+  #if __GNUC__ < 3    // gcc 2.x does not support noinline attribute
+    #define _NOINLINE_
+  #else
+    #define _NOINLINE_ __attribute__ ((noinline))
+  #endif
+#endif
+
 // All classes in the virtual machine must be subclassed
 // by one of the following allocation classes:
 //
@@ -98,12 +110,72 @@
 };
 #endif
 
-class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
+
+/*
+ * MemoryType bitmap layout:
+ * | 16 15 14 13 12 11 10 09 | 08 07 06 05 | 04 03 02 01 |
+ * |      memory type        |   object    | reserved    |
+ * |                         |     type    |             |
+ */
+enum MemoryType {
+  // Memory type by sub systems. It occupies lower byte.
+  mtNone              = 0x0000,  // undefined
+  mtClass             = 0x0100,  // memory class for Java classes
+  mtThread            = 0x0200,  // memory for thread objects
+  mtThreadStack       = 0x0300,
+  mtCode              = 0x0400,  // memory for generated code
+  mtGC                = 0x0500,  // memory for GC
+  mtCompiler          = 0x0600,  // memory for compiler
+  mtInternal          = 0x0700,  // memory used by VM, but does not belong to
+                                 // any of above categories, and not used for
+                                 // native memory tracking
+  mtOther             = 0x0800,  // memory not used by VM
+  mtSymbol            = 0x0900,  // symbol
+  mtNMT               = 0x0A00,  // memory used by native memory tracking
+  mtChunk             = 0x0B00,  // chunk that holds content of arenas
+  mtJavaHeap          = 0x0C00,  // Java heap
+  mtDontTrack         = 0x0D00,  // memory we donot or cannot track
+  mt_number_of_types  = 0x000C,  // number of memory types
+  mt_masks            = 0x7F00,
+
+  // object type mask
+  otArena             = 0x0010, // an arena object
+  otNMTRecorder       = 0x0020, // memory recorder object
+  ot_masks            = 0x00F0
+};
+
+#define IS_MEMORY_TYPE(flags, type) ((flags & mt_masks) == type)
+#define HAS_VALID_MEMORY_TYPE(flags)((flags & mt_masks) != mtNone)
+#define FLAGS_TO_MEMORY_TYPE(flags) (flags & mt_masks)
+
+#define IS_ARENA_OBJ(flags)         ((flags & ot_masks) == otArena)
+#define IS_NMT_RECORDER(flags)      ((flags & ot_masks) == otNMTRecorder)
+#define NMT_CAN_TRACK(flags)        (!IS_NMT_RECORDER(flags) && !(IS_MEMORY_TYPE(flags, mtDontTrack)))
+
+typedef unsigned short MEMFLAGS;
+
+extern bool NMT_track_callsite;
+
+// debug build does not inline
+#if defined(_DEBUG_)
+  #define CURRENT_PC       (NMT_track_callsite ? os::get_caller_pc(1) : 0)
+  #define CALLER_PC        (NMT_track_callsite ? os::get_caller_pc(2) : 0)
+  #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(3) : 0)
+#else
+  #define CURRENT_PC      (NMT_track_callsite? os::get_caller_pc(0) : 0)
+  #define CALLER_PC       (NMT_track_callsite ? os::get_caller_pc(1) : 0)
+  #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0)
+#endif
+
+
+
+template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
  public:
-  void* operator new(size_t size);
-  void* operator new (size_t size, const std::nothrow_t&  nothrow_constant);
+  _NOINLINE_ void* operator new(size_t size, address caller_pc = 0);
+  _NOINLINE_ void* operator new (size_t size, const std::nothrow_t&  nothrow_constant,
+                               address caller_pc = 0);
+
   void  operator delete(void* p);
-  void* new_array(size_t size);
 };
 
 // Base class for objects allocated on the stack only.
@@ -150,7 +222,7 @@
 
 //------------------------------Chunk------------------------------------------
 // Linked list of raw memory chunks
-class Chunk: public CHeapObj {
+class Chunk: CHeapObj<mtChunk> {
   friend class VMStructs;
 
  protected:
@@ -197,7 +269,7 @@
 
 //------------------------------Arena------------------------------------------
 // Fast allocation of memory
-class Arena: public CHeapObj {
+class Arena : public CHeapObj<mtNone|otArena> {
 protected:
   friend class ResourceMark;
   friend class HandleMark;
@@ -208,7 +280,8 @@
   Chunk *_chunk;                // current chunk
   char *_hwm, *_max;            // High water mark and max in current chunk
   void* grow(size_t x);         // Get a new Chunk of at least size x
-  NOT_PRODUCT(size_t _size_in_bytes;) // Size of arena (used for memory usage tracing)
+  size_t _size_in_bytes;        // Size of arena (used for native memory tracking)
+
   NOT_PRODUCT(static julong _bytes_allocated;) // total #bytes allocated since start
   friend class AllocStats;
   debug_only(void* malloc(size_t size);)
@@ -231,6 +304,15 @@
   void  destruct_contents();
   char* hwm() const             { return _hwm; }
 
+  // new operators
+  void* operator new (size_t size);
+  void* operator new (size_t size, const std::nothrow_t& nothrow_constant);
+
+  // dynamic memory type tagging
+  void* operator new(size_t size, MEMFLAGS flags);
+  void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags);
+  void  operator delete(void* p);
+
   // Fast allocate in the arena.  Common case is: pointer test + increment.
   void* Amalloc(size_t x) {
     assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2");
@@ -306,16 +388,20 @@
   size_t used() const;
 
   // Total # of bytes used
-  size_t size_in_bytes() const         NOT_PRODUCT({  return _size_in_bytes; }) PRODUCT_RETURN0;
-  void set_size_in_bytes(size_t size)  NOT_PRODUCT({ _size_in_bytes = size;  }) PRODUCT_RETURN;
+  size_t size_in_bytes() const         {  return _size_in_bytes; };
+  void set_size_in_bytes(size_t size);
+
   static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2)  PRODUCT_RETURN;
   static void free_all(char** start, char** end)                                     PRODUCT_RETURN;
 
+  // how many arena instances
+  NOT_PRODUCT(static volatile jint _instance_count;)
 private:
   // Reset this Arena to empty, access will trigger grow if necessary
   void   reset(void) {
     _first = _chunk = NULL;
     _hwm = _max = NULL;
+    set_size_in_bytes(0);
   }
 };
 
@@ -373,7 +459,7 @@
 #endif // ASSERT
 
  public:
-  void* operator new(size_t size, allocation_type type);
+  void* operator new(size_t size, allocation_type type, MEMFLAGS flags);
   void* operator new(size_t size, Arena *arena) {
       address res = (address)arena->Amalloc(size);
       DEBUG_ONLY(set_allocation_type(res, ARENA);)
@@ -409,17 +495,28 @@
 #define NEW_RESOURCE_OBJ(type)\
   NEW_RESOURCE_ARRAY(type, 1)
 
-#define NEW_C_HEAP_ARRAY(type, size)\
-  (type*) (AllocateHeap((size) * sizeof(type), XSTR(type) " in " __FILE__))
+#define NEW_C_HEAP_ARRAY(type, size, memflags)\
+  (type*) (AllocateHeap((size) * sizeof(type), memflags))
 
-#define REALLOC_C_HEAP_ARRAY(type, old, size)\
-  (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), XSTR(type) " in " __FILE__))
+#define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
+  (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags))
+
+#define FREE_C_HEAP_ARRAY(type,old,memflags) \
+  FreeHeap((char*)(old), memflags)
 
-#define FREE_C_HEAP_ARRAY(type,old) \
-  FreeHeap((char*)(old))
+#define NEW_C_HEAP_OBJ(type, memflags)\
+  NEW_C_HEAP_ARRAY(type, 1, memflags)
+
+
+#define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\
+  (type*) (AllocateHeap((size) * sizeof(type), memflags, pc))
 
-#define NEW_C_HEAP_OBJ(type)\
-  NEW_C_HEAP_ARRAY(type, 1)
+#define REALLOC_C_HEAP_ARRAY2(type, old, size, memflags, pc)\
+  (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, pc))
+
+#define NEW_C_HEAP_OBJ2(type, memflags, pc)\
+  NEW_C_HEAP_ARRAY2(type, 1, memflags, pc)
+
 
 extern bool warn_new_operator;
 
--- a/hotspot/src/share/vm/memory/allocation.inline.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/allocation.inline.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -48,33 +48,60 @@
 #endif
 
 // allocate using malloc; will fail if no memory available
-inline char* AllocateHeap(size_t size, const char* name = NULL) {
-  char* p = (char*) os::malloc(size);
+inline char* AllocateHeap(size_t size, MEMFLAGS flags, address pc = 0) {
+  if (pc == 0) {
+    pc = CURRENT_PC;
+  }
+  char* p = (char*) os::malloc(size, flags, pc);
   #ifdef ASSERT
-  if (PrintMallocFree) trace_heap_malloc(size, name, p);
-  #else
-  Unused_Variable(name);
+  if (PrintMallocFree) trace_heap_malloc(size, "AllocateHeap", p);
   #endif
-  if (p == NULL) vm_exit_out_of_memory(size, name);
+  if (p == NULL) vm_exit_out_of_memory(size, "AllocateHeap");
+  return p;
+}
+
+inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flags) {
+  char* p = (char*) os::realloc(old, size, flags, CURRENT_PC);
+  #ifdef ASSERT
+  if (PrintMallocFree) trace_heap_malloc(size, "ReallocateHeap", p);
+  #endif
+  if (p == NULL) vm_exit_out_of_memory(size, "ReallocateHeap");
   return p;
 }
 
-inline char* ReallocateHeap(char *old, size_t size, const char* name = NULL) {
-  char* p = (char*) os::realloc(old,size);
-  #ifdef ASSERT
-  if (PrintMallocFree) trace_heap_malloc(size, name, p);
-  #else
-  Unused_Variable(name);
-  #endif
-  if (p == NULL) vm_exit_out_of_memory(size, name);
-  return p;
-}
-
-inline void FreeHeap(void* p) {
+inline void FreeHeap(void* p, MEMFLAGS memflags = mtInternal) {
   #ifdef ASSERT
   if (PrintMallocFree) trace_heap_free(p);
   #endif
-  os::free(p);
+  os::free(p, memflags);
 }
 
+
+template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
+      address caller_pc){
+#ifdef ASSERT
+    void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC));
+    if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
+    return p;
+#else
+    return (void *) AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC));
+#endif
+  }
+
+template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
+  const std::nothrow_t&  nothrow_constant, address caller_pc) {
+#ifdef ASSERT
+    void* p = os::malloc(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC));
+    if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
+    return p;
+#else
+    return os::malloc(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC));
+#endif
+}
+
+template <MEMFLAGS F> void CHeapObj<F>::operator delete(void* p){
+   FreeHeap(p, F);
+}
+
+
 #endif // SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
--- a/hotspot/src/share/vm/memory/barrierSet.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/barrierSet.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -31,7 +31,7 @@
 // This class provides the interface between a barrier implementation and
 // the rest of the system.
 
-class BarrierSet: public CHeapObj {
+class BarrierSet: public CHeapObj<mtGC> {
   friend class VMStructs;
 public:
   enum Name {
--- a/hotspot/src/share/vm/memory/blockOffsetTable.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/blockOffsetTable.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -30,6 +30,7 @@
 #include "memory/universe.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
+#include "services/memTracker.hpp"
 
 //////////////////////////////////////////////////////////////////////
 // BlockOffsetSharedArray
@@ -44,6 +45,9 @@
   if (!rs.is_reserved()) {
     vm_exit_during_initialization("Could not reserve enough space for heap offset array");
   }
+
+  MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
+
   if (!_vs.initialize(rs, 0)) {
     vm_exit_during_initialization("Could not reserve enough space for heap offset array");
   }
--- a/hotspot/src/share/vm/memory/blockOffsetTable.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/blockOffsetTable.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -100,7 +100,7 @@
 //////////////////////////////////////////////////////////////////////////
 // BlockOffsetSharedArray
 //////////////////////////////////////////////////////////////////////////
-class BlockOffsetSharedArray: public CHeapObj {
+class BlockOffsetSharedArray: public CHeapObj<mtGC> {
   friend class BlockOffsetArray;
   friend class BlockOffsetArrayNonContigSpace;
   friend class BlockOffsetArrayContigSpace;
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -33,6 +33,7 @@
 #include "runtime/java.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/virtualspace.hpp"
+#include "services/memTracker.hpp"
 #ifdef COMPILER1
 #include "c1/c1_LIR.hpp"
 #include "c1/c1_LIRGenerator.hpp"
@@ -90,6 +91,9 @@
   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
   ReservedSpace heap_rs(_byte_map_size, rs_align, false);
+
+  MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
+
   os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
                        _page_size, heap_rs.base(), heap_rs.size());
   if (!heap_rs.is_reserved()) {
@@ -113,16 +117,17 @@
     // Do better than this for Merlin
     vm_exit_out_of_memory(_page_size, "card table last card");
   }
+
   *guard_card = last_card;
 
    _lowest_non_clean =
-    NEW_C_HEAP_ARRAY(CardArr, max_covered_regions);
+    NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC);
   _lowest_non_clean_chunk_size =
-    NEW_C_HEAP_ARRAY(size_t, max_covered_regions);
+    NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC);
   _lowest_non_clean_base_chunk_index =
-    NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions);
+    NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC);
   _last_LNC_resizing_collection =
-    NEW_C_HEAP_ARRAY(int, max_covered_regions);
+    NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC);
   if (_lowest_non_clean == NULL
       || _lowest_non_clean_chunk_size == NULL
       || _lowest_non_clean_base_chunk_index == NULL
--- a/hotspot/src/share/vm/memory/collectorPolicy.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/collectorPolicy.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -56,7 +56,7 @@
 class PermanentGenerationSpec;
 class MarkSweepPolicy;
 
-class CollectorPolicy : public CHeapObj {
+class CollectorPolicy : public CHeapObj<mtGC> {
  protected:
   PermanentGenerationSpec *_permanent_generation;
   GCPolicyCounters* _gc_policy_counters;
--- a/hotspot/src/share/vm/memory/defNewGeneration.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/defNewGeneration.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -89,8 +89,8 @@
 
   // Together, these keep <object with a preserved mark, mark value> pairs.
   // They should always contain the same number of elements.
-  Stack<oop>     _objs_with_preserved_marks;
-  Stack<markOop> _preserved_marks_of_objs;
+  Stack<oop, mtGC>     _objs_with_preserved_marks;
+  Stack<markOop, mtGC> _preserved_marks_of_objs;
 
   // Promotion failure handling
   OopClosure *_promo_failure_scan_stack_closure;
@@ -98,7 +98,7 @@
     _promo_failure_scan_stack_closure = scan_stack_closure;
   }
 
-  Stack<oop> _promo_failure_scan_stack;
+  Stack<oop, mtGC> _promo_failure_scan_stack;
   void drain_promo_failure_scan_stack(void);
   bool _promo_failure_drain_in_progress;
 
--- a/hotspot/src/share/vm/memory/filemap.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/filemap.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -44,7 +44,7 @@
 
 
 
-class FileMapInfo : public CHeapObj {
+class FileMapInfo : public CHeapObj<mtInternal> {
 private:
   enum {
     _invalid_version = -1,
--- a/hotspot/src/share/vm/memory/freeBlockDictionary.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/freeBlockDictionary.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -34,7 +34,7 @@
 // A FreeBlockDictionary is an abstract superclass that will allow
 // a number of alternative implementations in the future.
 template <class Chunk>
-class FreeBlockDictionary: public CHeapObj {
+class FreeBlockDictionary: public CHeapObj<mtGC> {
  public:
   enum Dither {
     atLeast,
--- a/hotspot/src/share/vm/memory/genMarkSweep.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/genMarkSweep.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -203,21 +203,21 @@
 
 #ifdef VALIDATE_MARK_SWEEP
   if (ValidateMarkSweep) {
-    _root_refs_stack    = new (ResourceObj::C_HEAP) GrowableArray<void*>(100, true);
-    _other_refs_stack   = new (ResourceObj::C_HEAP) GrowableArray<void*>(100, true);
-    _adjusted_pointers  = new (ResourceObj::C_HEAP) GrowableArray<void*>(100, true);
-    _live_oops          = new (ResourceObj::C_HEAP) GrowableArray<oop>(100, true);
-    _live_oops_moved_to = new (ResourceObj::C_HEAP) GrowableArray<oop>(100, true);
-    _live_oops_size     = new (ResourceObj::C_HEAP) GrowableArray<size_t>(100, true);
+    _root_refs_stack    = new (ResourceObj::C_HEAP, mtGC) GrowableArray<void*>(100, true);
+    _other_refs_stack   = new (ResourceObj::C_HEAP, mtGC) GrowableArray<void*>(100, true);
+    _adjusted_pointers  = new (ResourceObj::C_HEAP, mtGC) GrowableArray<void*>(100, true);
+    _live_oops          = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(100, true);
+    _live_oops_moved_to = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(100, true);
+    _live_oops_size     = new (ResourceObj::C_HEAP, mtGC) GrowableArray<size_t>(100, true);
   }
   if (RecordMarkSweepCompaction) {
     if (_cur_gc_live_oops == NULL) {
-      _cur_gc_live_oops           = new(ResourceObj::C_HEAP) GrowableArray<HeapWord*>(100, true);
-      _cur_gc_live_oops_moved_to  = new(ResourceObj::C_HEAP) GrowableArray<HeapWord*>(100, true);
-      _cur_gc_live_oops_size      = new(ResourceObj::C_HEAP) GrowableArray<size_t>(100, true);
-      _last_gc_live_oops          = new(ResourceObj::C_HEAP) GrowableArray<HeapWord*>(100, true);
-      _last_gc_live_oops_moved_to = new(ResourceObj::C_HEAP) GrowableArray<HeapWord*>(100, true);
-      _last_gc_live_oops_size     = new(ResourceObj::C_HEAP) GrowableArray<size_t>(100, true);
+      _cur_gc_live_oops           = new(ResourceObj::C_HEAP, mtGC) GrowableArray<HeapWord*>(100, true);
+      _cur_gc_live_oops_moved_to  = new(ResourceObj::C_HEAP, mtGC) GrowableArray<HeapWord*>(100, true);
+      _cur_gc_live_oops_size      = new(ResourceObj::C_HEAP, mtGC) GrowableArray<size_t>(100, true);
+      _last_gc_live_oops          = new(ResourceObj::C_HEAP, mtGC) GrowableArray<HeapWord*>(100, true);
+      _last_gc_live_oops_moved_to = new(ResourceObj::C_HEAP, mtGC) GrowableArray<HeapWord*>(100, true);
+      _last_gc_live_oops_size     = new(ResourceObj::C_HEAP, mtGC) GrowableArray<size_t>(100, true);
     } else {
       _cur_gc_live_oops->clear();
       _cur_gc_live_oops_moved_to->clear();
--- a/hotspot/src/share/vm/memory/genOopClosures.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/genOopClosures.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -34,10 +34,10 @@
 class CardTableModRefBS;
 class DefNewGeneration;
 
-template<class E, unsigned int N> class GenericTaskQueue;
-typedef GenericTaskQueue<oop, TASKQUEUE_SIZE> OopTaskQueue;
-template<class T> class GenericTaskQueueSet;
-typedef GenericTaskQueueSet<OopTaskQueue> OopTaskQueueSet;
+template<class E, MEMFLAGS F, unsigned int N> class GenericTaskQueue;
+typedef GenericTaskQueue<oop, mtGC, TASKQUEUE_SIZE> OopTaskQueue;
+template<class T, MEMFLAGS F> class GenericTaskQueueSet;
+typedef GenericTaskQueueSet<OopTaskQueue, mtGC> OopTaskQueueSet;
 
 // Closure for iterating roots from a particular generation
 // Note: all classes deriving from this MUST call this do_barrier
--- a/hotspot/src/share/vm/memory/genRemSet.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/genRemSet.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -35,7 +35,7 @@
 class OopsInGenClosure;
 class CardTableRS;
 
-class GenRemSet: public CHeapObj {
+class GenRemSet: public CHeapObj<mtGC> {
   friend class Generation;
 
   BarrierSet* _bs;
--- a/hotspot/src/share/vm/memory/generation.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/generation.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -86,7 +86,7 @@
 };
 
 
-class Generation: public CHeapObj {
+class Generation: public CHeapObj<mtGC> {
   friend class VMStructs;
  private:
   jlong _time_of_last_gc; // time when last gc on this generation happened (ms)
--- a/hotspot/src/share/vm/memory/generationSpec.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/generationSpec.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -32,7 +32,7 @@
 // some generation-specific behavior.  This is done here rather than as a
 // virtual function of Generation because these methods are needed in
 // initialization of the Generations.
-class GenerationSpec : public CHeapObj {
+class GenerationSpec : public CHeapObj<mtGC> {
   friend class VMStructs;
 private:
   Generation::Name _name;
@@ -71,7 +71,7 @@
 // The specification of a permanent generation. This class is very
 // similar to GenerationSpec in use. Due to PermGen's not being a
 // true Generation, we cannot combine the spec classes either.
-class PermanentGenerationSpec : public CHeapObj {
+class PermanentGenerationSpec : public CHeapObj<mtGC> {
   friend class VMStructs;
 private:
   PermGen::Name    _name;
--- a/hotspot/src/share/vm/memory/heap.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/heap.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -26,7 +26,7 @@
 #include "memory/heap.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/os.hpp"
-
+#include "services/memTracker.hpp"
 
 size_t CodeHeap::header_size() {
   return sizeof(HeapBlock);
@@ -130,6 +130,9 @@
   if (!_segmap.initialize(align_to_page_size(_number_of_reserved_segments), align_to_page_size(_number_of_committed_segments))) {
     return false;
   }
+
+  MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode);
+
   assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit  enough space for segment map");
   assert(_segmap.reserved_size()  >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map");
   assert(_segmap.reserved_size()  >= _segmap.committed_size()     , "just checking");
--- a/hotspot/src/share/vm/memory/heap.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/heap.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -77,7 +77,7 @@
   void set_link(FreeBlock* link)             { _link = link; }
 };
 
-class CodeHeap : public CHeapObj {
+class CodeHeap : public CHeapObj<mtCode> {
   friend class VMStructs;
  private:
   VirtualSpace _memory;                          // the memory holding the blocks
--- a/hotspot/src/share/vm/memory/heapInspection.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/heapInspection.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -116,7 +116,7 @@
 KlassInfoTable::KlassInfoTable(int size, HeapWord* ref) {
   _size = 0;
   _ref = ref;
-  _buckets = NEW_C_HEAP_ARRAY(KlassInfoBucket, size);
+  _buckets = NEW_C_HEAP_ARRAY(KlassInfoBucket, size, mtInternal);
   if (_buckets != NULL) {
     _size = size;
     for (int index = 0; index < _size; index++) {
@@ -130,7 +130,7 @@
     for (int index = 0; index < _size; index++) {
       _buckets[index].empty();
     }
-    FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets);
+    FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets, mtInternal);
     _size = 0;
   }
 }
@@ -179,7 +179,7 @@
 
 KlassInfoHisto::KlassInfoHisto(const char* title, int estimatedCount) :
   _title(title) {
-  _elements = new (ResourceObj::C_HEAP) GrowableArray<KlassInfoEntry*>(estimatedCount,true);
+  _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<KlassInfoEntry*>(estimatedCount,true);
 }
 
 KlassInfoHisto::~KlassInfoHisto() {
--- a/hotspot/src/share/vm/memory/heapInspection.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/heapInspection.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -44,7 +44,7 @@
 // to KlassInfoEntry's and is used to sort
 // the entries.
 
-class KlassInfoEntry: public CHeapObj {
+class KlassInfoEntry: public CHeapObj<mtInternal> {
  private:
   KlassInfoEntry* _next;
   klassOop        _klass;
@@ -72,7 +72,7 @@
   virtual void do_cinfo(KlassInfoEntry* cie) = 0;
 };
 
-class KlassInfoBucket: public CHeapObj {
+class KlassInfoBucket: public CHeapObj<mtInternal> {
  private:
   KlassInfoEntry* _list;
   KlassInfoEntry* list()           { return _list; }
--- a/hotspot/src/share/vm/memory/memRegion.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/memRegion.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -99,8 +99,8 @@
 
 class MemRegionClosureRO: public MemRegionClosure {
 public:
-  void* operator new(size_t size, ResourceObj::allocation_type type) {
-        return ResourceObj::operator new(size, type);
+  void* operator new(size_t size, ResourceObj::allocation_type type, MEMFLAGS flags) {
+        return ResourceObj::operator new(size, type, flags);
   }
   void* operator new(size_t size, Arena *arena) {
         return ResourceObj::operator new(size, arena);
--- a/hotspot/src/share/vm/memory/permGen.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/permGen.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -42,7 +42,7 @@
 
 // PermGen models the part of the heap used to allocate class meta-data.
 
-class PermGen : public CHeapObj {
+class PermGen : public CHeapObj<mtGC> {
   friend class VMStructs;
  protected:
   size_t _capacity_expansion_limit;  // maximum expansion allowed without a
--- a/hotspot/src/share/vm/memory/referencePolicy.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/referencePolicy.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -29,7 +29,7 @@
 // should be cleared.
 
 
-class ReferencePolicy : public CHeapObj {
+class ReferencePolicy : public CHeapObj<mtGC> {
  public:
   virtual bool should_clear_reference(oop p, jlong timestamp_clock) {
     ShouldNotReachHere();
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -108,7 +108,8 @@
   _num_q               = MAX2(1U, mt_processing_degree);
   _max_num_q           = MAX2(_num_q, mt_discovery_degree);
   _discovered_refs     = NEW_C_HEAP_ARRAY(DiscoveredList,
-                                          _max_num_q * number_of_subclasses_of_ref());
+            _max_num_q * number_of_subclasses_of_ref(), mtGC);
+
   if (_discovered_refs == NULL) {
     vm_exit_during_initialization("Could not allocated RefProc Array");
   }
--- a/hotspot/src/share/vm/memory/referenceProcessor.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/referenceProcessor.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -203,7 +203,7 @@
   }
 };
 
-class ReferenceProcessor : public CHeapObj {
+class ReferenceProcessor : public CHeapObj<mtGC> {
  protected:
   // Compatibility with pre-4965777 JDK's
   static bool _pending_list_uses_discovered_field;
--- a/hotspot/src/share/vm/memory/resourceArea.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/resourceArea.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,7 +75,7 @@
     if (UseMallocOnly) {
       // use malloc, but save pointer in res. area for later freeing
       char** save = (char**)internal_malloc_4(sizeof(char*));
-      return (*save = (char*)os::malloc(size));
+      return (*save = (char*)os::malloc(size, mtThread));
     }
 #endif
     return (char*)Amalloc(size);
@@ -93,18 +93,17 @@
   ResourceArea *_area;          // Resource area to stack allocate
   Chunk *_chunk;                // saved arena chunk
   char *_hwm, *_max;
-  NOT_PRODUCT(size_t _size_in_bytes;)
+  size_t _size_in_bytes;
 
   void initialize(Thread *thread) {
     _area = thread->resource_area();
     _chunk = _area->_chunk;
     _hwm = _area->_hwm;
     _max= _area->_max;
-    NOT_PRODUCT(_size_in_bytes = _area->size_in_bytes();)
+    _size_in_bytes = _area->size_in_bytes();
     debug_only(_area->_nesting++;)
     assert( _area->_nesting > 0, "must stack allocate RMs" );
   }
-
  public:
 
 #ifndef ASSERT
@@ -120,7 +119,7 @@
 
   ResourceMark( ResourceArea *r ) :
     _area(r), _chunk(r->_chunk), _hwm(r->_hwm), _max(r->_max) {
-    NOT_PRODUCT(_size_in_bytes = _area->size_in_bytes();)
+    _size_in_bytes = r->_size_in_bytes;
     debug_only(_area->_nesting++;)
     assert( _area->_nesting > 0, "must stack allocate RMs" );
   }
@@ -148,7 +147,7 @@
 
  private:
   void free_malloced_objects()                                         PRODUCT_RETURN;
-  size_t size_in_bytes()       NOT_PRODUCT({ return _size_in_bytes; }) PRODUCT_RETURN0;
+  size_t size_in_bytes() { return _size_in_bytes; }
 };
 
 //------------------------------DeoptResourceMark-----------------------------------
@@ -180,19 +179,19 @@
 // and they would be stack allocated. This leaves open the possibilty of accidental
 // misuse so we simple duplicate the ResourceMark functionality here.
 
-class DeoptResourceMark: public CHeapObj {
+class DeoptResourceMark: public CHeapObj<mtInternal> {
 protected:
   ResourceArea *_area;          // Resource area to stack allocate
   Chunk *_chunk;                // saved arena chunk
   char *_hwm, *_max;
-  NOT_PRODUCT(size_t _size_in_bytes;)
+  size_t _size_in_bytes;
 
   void initialize(Thread *thread) {
     _area = thread->resource_area();
     _chunk = _area->_chunk;
     _hwm = _area->_hwm;
     _max= _area->_max;
-    NOT_PRODUCT(_size_in_bytes = _area->size_in_bytes();)
+    _size_in_bytes = _area->size_in_bytes();
     debug_only(_area->_nesting++;)
     assert( _area->_nesting > 0, "must stack allocate RMs" );
   }
@@ -212,7 +211,7 @@
 
   DeoptResourceMark( ResourceArea *r ) :
     _area(r), _chunk(r->_chunk), _hwm(r->_hwm), _max(r->_max) {
-    NOT_PRODUCT(_size_in_bytes = _area->size_in_bytes();)
+    _size_in_bytes = _area->size_in_bytes();
     debug_only(_area->_nesting++;)
     assert( _area->_nesting > 0, "must stack allocate RMs" );
   }
@@ -240,7 +239,7 @@
 
  private:
   void free_malloced_objects()                                         PRODUCT_RETURN;
-  size_t size_in_bytes()       NOT_PRODUCT({ return _size_in_bytes; }) PRODUCT_RETURN0;
+  size_t size_in_bytes() { return _size_in_bytes; };
 };
 
 #endif // SHARE_VM_MEMORY_RESOURCEAREA_HPP
--- a/hotspot/src/share/vm/memory/restore.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/restore.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -132,7 +132,7 @@
   buffer += sizeof(intptr_t);
   int number_of_entries = *(intptr_t*)buffer;
   buffer += sizeof(intptr_t);
-  SymbolTable::create_table((HashtableBucket*)buffer, symbolTableLen,
+  SymbolTable::create_table((HashtableBucket<mtSymbol>*)buffer, symbolTableLen,
                             number_of_entries);
   buffer += symbolTableLen;
 
@@ -144,7 +144,7 @@
   buffer += sizeof(intptr_t);
   number_of_entries = *(intptr_t*)buffer;
   buffer += sizeof(intptr_t);
-  StringTable::create_table((HashtableBucket*)buffer, stringTableLen,
+  StringTable::create_table((HashtableBucket<mtSymbol>*)buffer, stringTableLen,
                             number_of_entries);
   buffer += stringTableLen;
 
@@ -157,7 +157,7 @@
   buffer += sizeof(intptr_t);
   number_of_entries = *(intptr_t*)buffer;
   buffer += sizeof(intptr_t);
-  SystemDictionary::set_shared_dictionary((HashtableBucket*)buffer,
+  SystemDictionary::set_shared_dictionary((HashtableBucket<mtClass>*)buffer,
                                           sharedDictionaryLen,
                                           number_of_entries);
   buffer += sharedDictionaryLen;
@@ -171,7 +171,7 @@
   buffer += sizeof(intptr_t);
   number_of_entries = *(intptr_t*)buffer;
   buffer += sizeof(intptr_t);
-  ClassLoader::create_package_info_table((HashtableBucket*)buffer, pkgInfoLen,
+  ClassLoader::create_package_info_table((HashtableBucket<mtClass>*)buffer, pkgInfoLen,
                                          number_of_entries);
   buffer += pkgInfoLen;
   ClassLoader::verify();
--- a/hotspot/src/share/vm/memory/space.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/space.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -105,7 +105,7 @@
 // bottom() <= top() <= end()
 // top() is inclusive and end() is exclusive.
 
-class Space: public CHeapObj {
+class Space: public CHeapObj<mtGC> {
   friend class VMStructs;
  protected:
   HeapWord* _bottom;
--- a/hotspot/src/share/vm/memory/tenuredGeneration.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/tenuredGeneration.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -65,7 +65,7 @@
   if (UseParNewGC && ParallelGCThreads > 0) {
     typedef ParGCAllocBufferWithBOT* ParGCAllocBufferWithBOTPtr;
     _alloc_buffers = NEW_C_HEAP_ARRAY(ParGCAllocBufferWithBOTPtr,
-                                      ParallelGCThreads);
+                                      ParallelGCThreads, mtGC);
     if (_alloc_buffers == NULL)
       vm_exit_during_initialization("Could not allocate alloc_buffers");
     for (uint i = 0; i < ParallelGCThreads; i++) {
--- a/hotspot/src/share/vm/memory/threadLocalAllocBuffer.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/threadLocalAllocBuffer.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -36,7 +36,7 @@
 //            It is thread-private at any time, but maybe multiplexed over
 //            time across multiple threads. The park()/unpark() pair is
 //            used to make it avaiable for such multiplexing.
-class ThreadLocalAllocBuffer: public CHeapObj {
+class ThreadLocalAllocBuffer: public CHeapObj<mtThread> {
   friend class VMStructs;
 private:
   HeapWord* _start;                              // address of TLAB
@@ -172,7 +172,7 @@
   void verify();
 };
 
-class GlobalTLABStats: public CHeapObj {
+class GlobalTLABStats: public CHeapObj<mtThread> {
 private:
 
   // Accumulate perfdata in private variables because
--- a/hotspot/src/share/vm/memory/universe.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/universe.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -764,7 +764,7 @@
 
   FileMapInfo* mapinfo = NULL;
   if (UseSharedSpaces) {
-    mapinfo = NEW_C_HEAP_OBJ(FileMapInfo);
+    mapinfo = NEW_C_HEAP_OBJ(FileMapInfo, mtInternal);
     memset(mapinfo, 0, sizeof(FileMapInfo));
 
     // Open the shared archive file, read and validate the header. If
@@ -1546,7 +1546,7 @@
     // This is the first previous version so make some space.
     // Start with 2 elements under the assumption that the class
     // won't be redefined much.
-    _prev_methods = new (ResourceObj::C_HEAP) GrowableArray<jweak>(2, true);
+    _prev_methods = new (ResourceObj::C_HEAP, mtClass) GrowableArray<jweak>(2, true);
   }
 
   // RC_TRACE macro has an embedded ResourceMark
--- a/hotspot/src/share/vm/memory/universe.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/memory/universe.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -43,7 +43,7 @@
 // Common parts of a methodOop cache. This cache safely interacts with
 // the RedefineClasses API.
 //
-class CommonMethodOopCache : public CHeapObj {
+class CommonMethodOopCache : public CHeapObj<mtClass> {
   // We save the klassOop and the idnum of methodOop in order to get
   // the current cached methodOop.
  private:
@@ -455,7 +455,7 @@
   static int base_vtable_size()               { return _base_vtable_size; }
 };
 
-class DeferredObjAllocEvent : public CHeapObj {
+class DeferredObjAllocEvent : public CHeapObj<mtInternal> {
   private:
     oop    _oop;
     size_t _bytesize;
--- a/hotspot/src/share/vm/oops/constantPoolOop.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/oops/constantPoolOop.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -764,7 +764,7 @@
                         unsigned char *bytes);
 };
 
-class SymbolHashMapEntry : public CHeapObj {
+class SymbolHashMapEntry : public CHeapObj<mtSymbol> {
  private:
   unsigned int        _hash;   // 32-bit hash for item
   SymbolHashMapEntry* _next;   // Next element in the linked list for this bucket
@@ -790,7 +790,7 @@
 }; // End SymbolHashMapEntry class
 
 
-class SymbolHashMapBucket : public CHeapObj {
+class SymbolHashMapBucket : public CHeapObj<mtSymbol> {
 
 private:
   SymbolHashMapEntry*    _entry;
@@ -803,7 +803,7 @@
 }; // End SymbolHashMapBucket class
 
 
-class SymbolHashMap: public CHeapObj {
+class SymbolHashMap: public CHeapObj<mtSymbol> {
 
  private:
   // Default number of entries in the table
@@ -816,7 +816,7 @@
 
   void initialize_table(int table_size) {
     _table_size = table_size;
-    _buckets = NEW_C_HEAP_ARRAY(SymbolHashMapBucket, table_size);
+    _buckets = NEW_C_HEAP_ARRAY(SymbolHashMapBucket, table_size, mtSymbol);
     for (int index = 0; index < table_size; index++) {
       _buckets[index].clear();
     }
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -989,7 +989,7 @@
   fieldDescriptor fd;
   int length = java_fields_count();
   // In DebugInfo nonstatic fields are sorted by offset.
-  int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1));
+  int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1), mtClass);
   int j = 0;
   for (int i = 0; i < length; i += 1) {
     fd.initialize(as_klassOop(), i);
@@ -1009,7 +1009,7 @@
       cl->do_field(&fd);
     }
   }
-  FREE_C_HEAP_ARRAY(int, fields_sorted);
+  FREE_C_HEAP_ARRAY(int, fields_sorted, mtClass);
 }
 
 
@@ -1236,7 +1236,7 @@
     if (length <= idnum) {
       // allocate a new cache that might be used
       size_t size = MAX2(idnum+1, (size_t)ik_h->idnum_allocated_count());
-      new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1);
+      new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1, mtClass);
       memset(new_jmeths, 0, (size+1)*sizeof(jmethodID));
       // cache size is stored in element[0], other elements offset by one
       new_jmeths[0] = (jmethodID)size;
@@ -1397,7 +1397,7 @@
     // cache size is stored in element[0], other elements offset by one
     if (indices == NULL || (length = (size_t)indices[0]) <= idnum) {
       size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count());
-      int* new_indices = NEW_C_HEAP_ARRAY(int, size+1);
+      int* new_indices = NEW_C_HEAP_ARRAY(int, size+1, mtClass);
       new_indices[0] = (int)size;
       // copy any existing entries
       size_t i;
@@ -1933,7 +1933,7 @@
 
   // deallocate the cached class file
   if (_cached_class_file_bytes != NULL) {
-    os::free(_cached_class_file_bytes);
+    os::free(_cached_class_file_bytes, mtClass);
     _cached_class_file_bytes = NULL;
     _cached_class_file_len = 0;
   }
@@ -2530,7 +2530,7 @@
     // This is the first previous version so make some space.
     // Start with 2 elements under the assumption that the class
     // won't be redefined much.
-    _previous_versions =  new (ResourceObj::C_HEAP)
+    _previous_versions =  new (ResourceObj::C_HEAP, mtClass)
                             GrowableArray<PreviousVersionNode *>(2, true);
   }
 
@@ -2556,7 +2556,7 @@
       ("add: all methods are obsolete; flushing any EMCP weak refs"));
   } else {
     int local_count = 0;
-    GrowableArray<jweak>* method_refs = new (ResourceObj::C_HEAP)
+    GrowableArray<jweak>* method_refs = new (ResourceObj::C_HEAP, mtClass)
       GrowableArray<jweak>(emcp_method_count, true);
     for (int i = 0; i < old_methods->length(); i++) {
       if (emcp_methods->at(i)) {
@@ -2948,7 +2948,7 @@
 
   while (_current_index < length) {
     PreviousVersionNode * pv_node = _previous_versions->at(_current_index++);
-    PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP)
+    PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP, mtClass)
                                           PreviousVersionInfo(pv_node);
 
     constantPoolHandle cp_h = pv_info->prev_constant_pool_handle();
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -1008,7 +1008,7 @@
 
 
 /* JNIid class for jfieldIDs only */
-class JNIid: public CHeapObj {
+class JNIid: public CHeapObj<mtClass> {
   friend class VMStructs;
  private:
   klassOop           _holder;
@@ -1059,7 +1059,7 @@
 // reference must be used because a weak reference would be seen as
 // collectible. A GrowableArray of PreviousVersionNodes is attached
 // to the instanceKlass as needed. See PreviousVersionWalker below.
-class PreviousVersionNode : public CHeapObj {
+class PreviousVersionNode : public CHeapObj<mtClass> {
  private:
   // A shared ConstantPool is never collected so we'll always have
   // a reference to it so we can update items in the cache. We'll
@@ -1154,7 +1154,7 @@
 // noticed since an nmethod should be removed as many times are it's
 // added.
 //
-class nmethodBucket: public CHeapObj {
+class nmethodBucket: public CHeapObj<mtClass> {
   friend class VMStructs;
  private:
   nmethod*       _nmethod;
--- a/hotspot/src/share/vm/oops/methodOop.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/oops/methodOop.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -801,7 +801,7 @@
 // breakpoints are written only at safepoints, and are read
 // concurrently only outside of safepoints.
 
-class BreakpointInfo : public CHeapObj {
+class BreakpointInfo : public CHeapObj<mtClass> {
   friend class VMStructs;
  private:
   Bytecodes::Code  _orig_bytecode;
--- a/hotspot/src/share/vm/oops/symbol.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/oops/symbol.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -38,7 +38,7 @@
 
 void* Symbol::operator new(size_t sz, int len, TRAPS) {
   int alloc_size = object_size(len)*HeapWordSize;
-  address res = (address) AllocateHeap(alloc_size, "symbol");
+  address res = (address) AllocateHeap(alloc_size, mtSymbol);
   DEBUG_ONLY(set_allocation_type(res, ResourceObj::C_HEAP);)
   return res;
 }
--- a/hotspot/src/share/vm/opto/idealGraphPrinter.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/opto/idealGraphPrinter.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -130,15 +130,15 @@
       } else {
         st.print("%s%d", PrintIdealGraphFile, _file_count);
       }
-      fileStream *stream = new (ResourceObj::C_HEAP) fileStream(st.as_string());
+      fileStream *stream = new (ResourceObj::C_HEAP, mtCompiler) fileStream(st.as_string());
       _output = stream;
     } else {
-      fileStream *stream = new (ResourceObj::C_HEAP) fileStream(PrintIdealGraphFile);
+      fileStream *stream = new (ResourceObj::C_HEAP, mtCompiler) fileStream(PrintIdealGraphFile);
       _output = stream;
     }
     _file_count++;
   } else {
-    _stream = new (ResourceObj::C_HEAP) networkStream();
+    _stream = new (ResourceObj::C_HEAP, mtCompiler) networkStream();
 
     // Try to connect to visualizer
     if (_stream->connect(PrintIdealGraphAddress, PrintIdealGraphPort)) {
@@ -160,7 +160,7 @@
     }
   }
 
-  _xml = new (ResourceObj::C_HEAP) xmlStream(_output);
+  _xml = new (ResourceObj::C_HEAP, mtCompiler) xmlStream(_output);
 
   head(TOP_ELEMENT);
 }
--- a/hotspot/src/share/vm/opto/macro.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/opto/macro.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -409,7 +409,7 @@
   Node *alloc_mem = alloc->in(TypeFunc::Memory);
 
   uint length = mem->req();
-  GrowableArray <Node *> values(length, length, NULL);
+  GrowableArray <Node *> values(length, length, NULL, false);
 
   // create a new Phi for the value
   PhiNode *phi = new (C, length) PhiNode(mem->in(0), phi_type, NULL, instance_id, alias_idx, offset);
--- a/hotspot/src/share/vm/opto/runtime.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/opto/runtime.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -55,7 +55,7 @@
 // code in various ways.  Currently they are used by the lock coarsening code
 //
 
-class NamedCounter : public CHeapObj {
+class NamedCounter : public CHeapObj<mtCompiler> {
 public:
     enum CounterTag {
     NoTag,
--- a/hotspot/src/share/vm/opto/type.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/opto/type.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -208,7 +208,7 @@
   // locking.
 
   Arena* save = current->type_arena();
-  Arena* shared_type_arena = new Arena();
+  Arena* shared_type_arena = new (mtCompiler)Arena();
 
   current->set_type_arena(shared_type_arena);
   _shared_type_dict =
--- a/hotspot/src/share/vm/prims/jni.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/prims/jni.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -33,6 +33,7 @@
 #ifndef SERIALGC
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #endif // SERIALGC
+#include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/gcLocker.inline.hpp"
 #include "memory/oopFactory.hpp"
@@ -3270,7 +3271,7 @@
   int s_len = java_lang_String::length(s);
   typeArrayOop s_value = java_lang_String::value(s);
   int s_offset = java_lang_String::offset(s);
-  jchar* buf = NEW_C_HEAP_ARRAY(jchar, s_len + 1);  // add one for zero termination
+  jchar* buf = NEW_C_HEAP_ARRAY(jchar, s_len + 1, mtInternal);  // add one for zero termination
   if (s_len > 0) {
     memcpy(buf, s_value->char_at_addr(s_offset), sizeof(jchar)*s_len);
   }
@@ -3363,7 +3364,7 @@
 #endif /* USDT2 */
   oop java_string = JNIHandles::resolve_non_null(string);
   size_t length = java_lang_String::utf8_length(java_string);
-  char* result = AllocateHeap(length + 1, "GetStringUTFChars");
+  char* result = AllocateHeap(length + 1, mtInternal);
   java_lang_String::as_utf8_string(java_string, result, (int) length + 1);
   if (isCopy != NULL) *isCopy = JNI_TRUE;
 #ifndef USDT2
@@ -3619,7 +3620,7 @@
      * Avoid asserts in typeArrayOop. */ \
     result = (ElementType*)get_bad_address(); \
   } else { \
-    result = NEW_C_HEAP_ARRAY(ElementType, len); \
+    result = NEW_C_HEAP_ARRAY(ElementType, len, mtInternal); \
     /* copy the array to the c chunk */ \
     memcpy(result, a->Tag##_at_addr(0), sizeof(ElementType)*len); \
   } \
@@ -3656,7 +3657,7 @@
      * Avoid asserts in typeArrayOop. */ \
     result = (ElementType*)get_bad_address(); \
   } else { \
-    result = NEW_C_HEAP_ARRAY(ElementType, len); \
+    result = NEW_C_HEAP_ARRAY(ElementType, len, mtInternal); \
     /* copy the array to the c chunk */ \
     memcpy(result, a->Tag##_at_addr(0), sizeof(ElementType)*len); \
   } \
--- a/hotspot/src/share/vm/prims/jniCheck.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/prims/jniCheck.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -1308,7 +1308,7 @@
     assert (isCopy == NULL || *isCopy == JNI_TRUE, "GetStringChars didn't return a copy as expected");
 
     size_t len = UNCHECKED()->GetStringLength(env,str) + 1; // + 1 for NULL termination
-    jint* tagLocation = (jint*) AllocateHeap(len * sizeof(jchar) + sizeof(jint), "checked_jni_GetStringChars");
+    jint* tagLocation = (jint*) AllocateHeap(len * sizeof(jchar) + sizeof(jint), mtInternal);
     *tagLocation = STRING_TAG;
     jchar* newResult = (jchar*) (tagLocation + 1);
     memcpy(newResult, result, len * sizeof(jchar));
@@ -1378,13 +1378,13 @@
     assert (isCopy == NULL || *isCopy == JNI_TRUE, "GetStringUTFChars didn't return a copy as expected");
 
     size_t len = strlen(result) + 1; // + 1 for NULL termination
-    jint* tagLocation = (jint*) AllocateHeap(len + sizeof(jint), "checked_jni_GetStringUTFChars");
+    jint* tagLocation = (jint*) AllocateHeap(len + sizeof(jint), mtInternal);
     *tagLocation = STRING_UTF_TAG;
     char* newResult = (char*) (tagLocation + 1);
     strcpy(newResult, result);
     // Avoiding call to UNCHECKED()->ReleaseStringUTFChars() since that will fire unexpected dtrace probes
     // Note that the dtrace arguments for the allocated memory will not match up with this solution.
-    FreeHeap((char*)result);
+    FreeHeap((char*)result, mtInternal);
 
     functionExit(env);
     return newResult;
--- a/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -68,11 +68,11 @@
 
   ~JvmtiConstantPoolReconstituter() {
     if (_symmap != NULL) {
-      os::free(_symmap);
+      os::free(_symmap, mtClass);
       _symmap = NULL;
     }
     if (_classmap != NULL) {
-      os::free(_classmap);
+      os::free(_classmap, mtClass);
       _classmap = NULL;
     }
   }
--- a/hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -157,7 +157,7 @@
   assert(_global_code_blobs == NULL, "checking");
 
   // create the global list
-  _global_code_blobs = new (ResourceObj::C_HEAP) GrowableArray<JvmtiCodeBlobDesc*>(50,true);
+  _global_code_blobs = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JvmtiCodeBlobDesc*>(50,true);
 
   // iterate over the stub code descriptors and put them in the list first.
   int index = 0;
@@ -247,7 +247,7 @@
     int pcds_in_method;
 
     pcds_in_method = (nm->scopes_pcs_end() - nm->scopes_pcs_begin());
-    map = NEW_C_HEAP_ARRAY(jvmtiAddrLocationMap, pcds_in_method);
+    map = NEW_C_HEAP_ARRAY(jvmtiAddrLocationMap, pcds_in_method, mtInternal);
 
     address scopes_data = nm->scopes_data_begin();
     for( pcd = nm->scopes_pcs_begin(); pcd < nm->scopes_pcs_end(); ++pcd ) {
--- a/hotspot/src/share/vm/prims/jvmtiEnv.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiEnv.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -1012,7 +1012,7 @@
 
   // growable array of jvmti monitors info on the C-heap
   GrowableArray<jvmtiMonitorStackDepthInfo*> *owned_monitors_list =
-      new (ResourceObj::C_HEAP) GrowableArray<jvmtiMonitorStackDepthInfo*>(1, true);
+      new (ResourceObj::C_HEAP, mtInternal) GrowableArray<jvmtiMonitorStackDepthInfo*>(1, true);
 
   uint32_t debug_bits = 0;
   if (is_thread_fully_suspended(java_thread, true, &debug_bits)) {
@@ -1057,7 +1057,7 @@
 
   // growable array of jvmti monitors info on the C-heap
   GrowableArray<jvmtiMonitorStackDepthInfo*> *owned_monitors_list =
-         new (ResourceObj::C_HEAP) GrowableArray<jvmtiMonitorStackDepthInfo*>(1, true);
+         new (ResourceObj::C_HEAP, mtInternal) GrowableArray<jvmtiMonitorStackDepthInfo*>(1, true);
 
   uint32_t debug_bits = 0;
   if (is_thread_fully_suspended(java_thread, true, &debug_bits)) {
--- a/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -381,7 +381,7 @@
     _native_method_prefixes = NULL;
   } else {
     // there are prefixes, allocate an array to hold them, and fill it
-    char** new_prefixes = (char**)os::malloc((prefix_count) * sizeof(char*));
+    char** new_prefixes = (char**)os::malloc((prefix_count) * sizeof(char*), mtInternal);
     if (new_prefixes == NULL) {
       return JVMTI_ERROR_OUT_OF_MEMORY;
     }
@@ -1150,7 +1150,7 @@
 
 ResourceTracker::ResourceTracker(JvmtiEnv* env) {
   _env = env;
-  _allocations = new (ResourceObj::C_HEAP) GrowableArray<unsigned char*>(20, true);
+  _allocations = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<unsigned char*>(20, true);
   _failed = false;
 }
 ResourceTracker::~ResourceTracker() {
--- a/hotspot/src/share/vm/prims/jvmtiEnvBase.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiEnvBase.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -52,7 +52,7 @@
 // done via JNI GetEnv() call. Multiple attachments are
 // allowed in jvmti.
 
-class JvmtiEnvBase : public CHeapObj {
+class JvmtiEnvBase : public CHeapObj<mtInternal> {
 
  private:
 
@@ -175,7 +175,7 @@
     if (size == 0) {
       *mem_ptr = NULL;
     } else {
-      *mem_ptr = (unsigned char *)os::malloc((size_t)size);
+      *mem_ptr = (unsigned char *)os::malloc((size_t)size, mtInternal);
       if (*mem_ptr == NULL) {
         return JVMTI_ERROR_OUT_OF_MEMORY;
       }
@@ -185,7 +185,7 @@
 
   jvmtiError deallocate(unsigned char* mem) {
     if (mem != NULL) {
-      os::free(mem);
+      os::free(mem, mtInternal);
     }
     return JVMTI_ERROR_NONE;
   }
--- a/hotspot/src/share/vm/prims/jvmtiEnvThreadState.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiEnvThreadState.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -95,7 +95,7 @@
 //
 
 JvmtiFramePops::JvmtiFramePops() {
-  _pops = new (ResourceObj::C_HEAP) GrowableArray<int> (2, true);
+  _pops = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int> (2, true);
 }
 
 JvmtiFramePops::~JvmtiFramePops() {
--- a/hotspot/src/share/vm/prims/jvmtiEnvThreadState.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiEnvThreadState.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -76,7 +76,7 @@
 // It records what frames on a threads stack should post frame_pop events when they're exited.
 //
 
-class JvmtiFramePops : public CHeapObj {
+class JvmtiFramePops : public CHeapObj<mtInternal> {
  private:
   GrowableArray<int>* _pops;
 
@@ -107,7 +107,7 @@
 // 3: Location of last executed instruction, used to filter out duplicate
 //    events due to instruction rewriting.
 
-class JvmtiEnvThreadState : public CHeapObj {
+class JvmtiEnvThreadState : public CHeapObj<mtInternal> {
 private:
   friend class JvmtiEnv;
   JavaThread        *_thread;
--- a/hotspot/src/share/vm/prims/jvmtiExport.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiExport.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -617,7 +617,7 @@
       if (caching_needed && *_cached_data_ptr == NULL) {
         // data has been changed by the new retransformable agent
         // and it hasn't already been cached, cache it
-        *_cached_data_ptr = (unsigned char *)os::malloc(_curr_len);
+        *_cached_data_ptr = (unsigned char *)os::malloc(_curr_len, mtInternal);
         memcpy(*_cached_data_ptr, _curr_data, _curr_len);
         *_cached_length_ptr = _curr_len;
       }
@@ -720,7 +720,7 @@
     JvmtiCodeBlobEvents::build_jvmti_addr_location_map(nm, &_map, &_map_length);
   }
   ~JvmtiCompiledMethodLoadEventMark() {
-     FREE_C_HEAP_ARRAY(jvmtiAddrLocationMap, _map);
+     FREE_C_HEAP_ARRAY(jvmtiAddrLocationMap, _map, mtInternal);
   }
 
   jint code_size() { return _code_size; }
@@ -2323,7 +2323,7 @@
 // register a stub
 void JvmtiDynamicCodeEventCollector::register_stub(const char* name, address start, address end) {
  if (_code_blobs == NULL) {
-   _code_blobs = new (ResourceObj::C_HEAP) GrowableArray<JvmtiCodeBlobDesc*>(1,true);
+   _code_blobs = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JvmtiCodeBlobDesc*>(1,true);
  }
  _code_blobs->append(new JvmtiCodeBlobDesc(name, start, end));
 }
@@ -2357,7 +2357,7 @@
 void JvmtiVMObjectAllocEventCollector::record_allocation(oop obj) {
   assert(is_enabled(), "VM object alloc event collector is not enabled");
   if (_allocated == NULL) {
-    _allocated = new (ResourceObj::C_HEAP) GrowableArray<oop>(1, true);
+    _allocated = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(1, true);
   }
   _allocated->push(obj);
 }
--- a/hotspot/src/share/vm/prims/jvmtiExport.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiExport.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -350,7 +350,7 @@
 
 // Support class used by JvmtiDynamicCodeEventCollector and others. It
 // describes a single code blob by name and address range.
-class JvmtiCodeBlobDesc : public CHeapObj {
+class JvmtiCodeBlobDesc : public CHeapObj<mtInternal> {
  private:
   char _name[64];
   address _code_begin;
--- a/hotspot/src/share/vm/prims/jvmtiExtensions.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiExtensions.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -49,8 +49,8 @@
 // event. The function and the event are registered here.
 //
 void JvmtiExtensions::register_extensions() {
-  _ext_functions = new (ResourceObj::C_HEAP) GrowableArray<jvmtiExtensionFunctionInfo*>(1,true);
-  _ext_events = new (ResourceObj::C_HEAP) GrowableArray<jvmtiExtensionEventInfo*>(1,true);
+  _ext_functions = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<jvmtiExtensionFunctionInfo*>(1,true);
+  _ext_events = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<jvmtiExtensionEventInfo*>(1,true);
 
   // register our extension function
   static jvmtiParamInfo func_params[] = {
--- a/hotspot/src/share/vm/prims/jvmtiGetLoadedClasses.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiGetLoadedClasses.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -152,7 +152,7 @@
 
   // Public methods that get called within the scope of the closure
   void allocate() {
-    _list = NEW_C_HEAP_ARRAY(Handle, _count);
+    _list = NEW_C_HEAP_ARRAY(Handle, _count, mtInternal);
     assert(_list != NULL, "Out of memory");
     if (_list == NULL) {
       _count = 0;
--- a/hotspot/src/share/vm/prims/jvmtiImpl.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiImpl.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -98,8 +98,8 @@
 void GrowableCache::recache() {
   int len = _elements->length();
 
-  FREE_C_HEAP_ARRAY(address, _cache);
-  _cache = NEW_C_HEAP_ARRAY(address,len+1);
+  FREE_C_HEAP_ARRAY(address, _cache, mtInternal);
+  _cache = NEW_C_HEAP_ARRAY(address,len+1, mtInternal);
 
   for (int i=0; i<len; i++) {
     _cache[i] = _elements->at(i)->getCacheValue();
@@ -142,13 +142,13 @@
 GrowableCache::~GrowableCache() {
   clear();
   delete _elements;
-  FREE_C_HEAP_ARRAY(address, _cache);
+  FREE_C_HEAP_ARRAY(address, _cache, mtInternal);
 }
 
 void GrowableCache::initialize(void *this_obj, void listener_fun(void *, address*) ) {
   _this_obj       = this_obj;
   _listener_fun   = listener_fun;
-  _elements       = new (ResourceObj::C_HEAP) GrowableArray<GrowableElement*>(5,true);
+  _elements       = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<GrowableElement*>(5,true);
   recache();
 }
 
--- a/hotspot/src/share/vm/prims/jvmtiImpl.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiImpl.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -64,7 +64,7 @@
 // to update its pointer to the address cache.
 //
 
-class GrowableElement : public CHeapObj {
+class GrowableElement : public CHeapObj<mtInternal> {
 public:
   virtual address getCacheValue()          =0;
   virtual bool equals(GrowableElement* e)  =0;
@@ -130,7 +130,7 @@
 // Note   : typesafe wrapper for GrowableCache of JvmtiBreakpoint
 //
 
-class JvmtiBreakpointCache : public CHeapObj {
+class JvmtiBreakpointCache : public CHeapObj<mtInternal> {
 
 private:
   GrowableCache _cache;
@@ -258,7 +258,7 @@
 // CHeap allocated to emphasize its similarity to JvmtiFramePops.
 //
 
-class JvmtiBreakpoints : public CHeapObj {
+class JvmtiBreakpoints : public CHeapObj<mtInternal> {
 private:
 
   JvmtiBreakpointCache _bps;
@@ -496,7 +496,7 @@
 class JvmtiDeferredEventQueue : AllStatic {
   friend class JvmtiDeferredEvent;
  private:
-  class QueueNode : public CHeapObj {
+  class QueueNode : public CHeapObj<mtInternal> {
    private:
     JvmtiDeferredEvent _event;
     QueueNode* _next;
--- a/hotspot/src/share/vm/prims/jvmtiRawMonitor.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiRawMonitor.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -27,7 +27,7 @@
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/thread.hpp"
 
-GrowableArray<JvmtiRawMonitor*> *JvmtiPendingMonitors::_monitors = new (ResourceObj::C_HEAP) GrowableArray<JvmtiRawMonitor*>(1,true);
+GrowableArray<JvmtiRawMonitor*> *JvmtiPendingMonitors::_monitors = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JvmtiRawMonitor*>(1,true);
 
 void JvmtiPendingMonitors::transition_raw_monitors() {
   assert((Threads::number_of_threads()==1),
@@ -53,7 +53,7 @@
 
 JvmtiRawMonitor::JvmtiRawMonitor(const char *name) {
 #ifdef ASSERT
-  _name = strcpy(NEW_C_HEAP_ARRAY(char, strlen(name) + 1), name);
+  _name = strcpy(NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtInternal), name);
 #else
   _name = NULL;
 #endif
--- a/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -831,7 +831,7 @@
 jvmtiError VM_RedefineClasses::load_new_class_versions(TRAPS) {
   // For consistency allocate memory using os::malloc wrapper.
   _scratch_classes = (instanceKlassHandle *)
-    os::malloc(sizeof(instanceKlassHandle) * _class_count);
+    os::malloc(sizeof(instanceKlassHandle) * _class_count, mtInternal);
   if (_scratch_classes == NULL) {
     return JVMTI_ERROR_OUT_OF_MEMORY;
   }
--- a/hotspot/src/share/vm/prims/jvmtiTagMap.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiTagMap.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -55,7 +55,7 @@
 // and the tag value. In addition an entry includes a next pointer which
 // is used to chain entries together.
 
-class JvmtiTagHashmapEntry : public CHeapObj {
+class JvmtiTagHashmapEntry : public CHeapObj<mtInternal> {
  private:
   friend class JvmtiTagMap;
 
@@ -106,7 +106,7 @@
 // entries. It also provides a function to iterate over all entries
 // in the hashmap.
 
-class JvmtiTagHashmap : public CHeapObj {
+class JvmtiTagHashmap : public CHeapObj<mtInternal> {
  private:
   friend class JvmtiTagMap;
 
@@ -150,7 +150,7 @@
     _resize_threshold = (int)(_load_factor * _size);
     _resizing_enabled = true;
     size_t s = initial_size * sizeof(JvmtiTagHashmapEntry*);
-    _table = (JvmtiTagHashmapEntry**)os::malloc(s);
+    _table = (JvmtiTagHashmapEntry**)os::malloc(s, mtInternal);
     if (_table == NULL) {
       vm_exit_out_of_memory(s, "unable to allocate initial hashtable for jvmti object tags");
     }
@@ -188,7 +188,7 @@
 
     // allocate new table
     size_t s = new_size * sizeof(JvmtiTagHashmapEntry*);
-    JvmtiTagHashmapEntry** new_table = (JvmtiTagHashmapEntry**)os::malloc(s);
+    JvmtiTagHashmapEntry** new_table = (JvmtiTagHashmapEntry**)os::malloc(s, mtInternal);
     if (new_table == NULL) {
       warning("unable to allocate larger hashtable for jvmti object tags");
       set_resizing_enabled(false);
@@ -776,7 +776,7 @@
 // For each field it holds the field index (as defined by the JVMTI specification),
 // the field type, and the offset.
 
-class ClassFieldDescriptor: public CHeapObj {
+class ClassFieldDescriptor: public CHeapObj<mtInternal> {
  private:
   int _field_index;
   int _field_offset;
@@ -790,7 +790,7 @@
   int field_offset() const  { return _field_offset; }
 };
 
-class ClassFieldMap: public CHeapObj {
+class ClassFieldMap: public CHeapObj<mtInternal> {
  private:
   enum {
     initial_field_count = 5
@@ -821,7 +821,8 @@
 };
 
 ClassFieldMap::ClassFieldMap() {
-  _fields = new (ResourceObj::C_HEAP) GrowableArray<ClassFieldDescriptor*>(initial_field_count, true);
+  _fields = new (ResourceObj::C_HEAP, mtInternal)
+    GrowableArray<ClassFieldDescriptor*>(initial_field_count, true);
 }
 
 ClassFieldMap::~ClassFieldMap() {
@@ -892,7 +893,7 @@
 // heap iteration and avoid creating a field map for each object in the heap
 // (only need to create the map when the first instance of a class is encountered).
 //
-class JvmtiCachedClassFieldMap : public CHeapObj {
+class JvmtiCachedClassFieldMap : public CHeapObj<mtInternal> {
  private:
    enum {
      initial_class_count = 200
@@ -957,7 +958,8 @@
 // record that the given instanceKlass is caching a field map
 void JvmtiCachedClassFieldMap::add_to_class_list(instanceKlass* ik) {
   if (_class_list == NULL) {
-    _class_list = new (ResourceObj::C_HEAP) GrowableArray<instanceKlass*>(initial_class_count, true);
+    _class_list = new (ResourceObj::C_HEAP, mtInternal)
+      GrowableArray<instanceKlass*>(initial_class_count, true);
   }
   _class_list->push(ik);
 }
@@ -1526,8 +1528,8 @@
     _env = env;
     _tags = (jlong*)tags;
     _tag_count = tag_count;
-    _object_results = new (ResourceObj::C_HEAP) GrowableArray<jobject>(1,true);
-    _tag_results = new (ResourceObj::C_HEAP) GrowableArray<uint64_t>(1,true);
+    _object_results = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<jobject>(1,true);
+    _tag_results = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<uint64_t>(1,true);
   }
 
   ~TagObjectCollector() {
@@ -1672,8 +1674,8 @@
   Universe::heap()->ensure_parsability(false);  // no need to retire TLABs
 
   // create stacks for interesting headers
-  _saved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(4000, true);
-  _saved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
+  _saved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(4000, true);
+  _saved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(4000, true);
 
   if (UseBiasedLocking) {
     BiasedLocking::preserve_marks();
@@ -2712,7 +2714,7 @@
   bool _reporting_string_values;
 
   GrowableArray<oop>* create_visit_stack() {
-    return new (ResourceObj::C_HEAP) GrowableArray<oop>(initial_visit_stack_size, true);
+    return new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(initial_visit_stack_size, true);
   }
 
   // accessors
--- a/hotspot/src/share/vm/prims/jvmtiTagMap.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiTagMap.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -41,7 +41,7 @@
 class JvmtiTagHashmapEntry;
 class JvmtiTagHashmapEntryClosure;
 
-class JvmtiTagMap :  public CHeapObj {
+class JvmtiTagMap :  public CHeapObj<mtInternal> {
  private:
 
   enum{
--- a/hotspot/src/share/vm/prims/jvmtiThreadState.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiThreadState.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -72,7 +72,7 @@
 //
 // The Jvmti state for each thread (across all JvmtiEnv):
 // 1. Local table of enabled events.
-class JvmtiThreadState : public CHeapObj {
+class JvmtiThreadState : public CHeapObj<mtInternal> {
  private:
   friend class JvmtiEnv;
   JavaThread        *_thread;
--- a/hotspot/src/share/vm/prims/jvmtiUtil.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiUtil.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -40,7 +40,7 @@
   if (_single_threaded_resource_area == NULL) {
     // lazily create the single threaded resource area
     // pick a size which is not a standard since the pools don't exist yet
-    _single_threaded_resource_area = new ResourceArea(Chunk::non_pool_size);
+    _single_threaded_resource_area = new (mtInternal) ResourceArea(Chunk::non_pool_size);
   }
   return _single_threaded_resource_area;
 }
--- a/hotspot/src/share/vm/prims/unsafe.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/prims/unsafe.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -596,7 +596,7 @@
     return 0;
   }
   sz = round_to(sz, HeapWordSize);
-  void* x = os::malloc(sz);
+  void* x = os::malloc(sz, mtInternal);
   if (x == NULL) {
     THROW_0(vmSymbols::java_lang_OutOfMemoryError());
   }
@@ -616,7 +616,7 @@
     return 0;
   }
   sz = round_to(sz, HeapWordSize);
-  void* x = (p == NULL) ? os::malloc(sz) : os::realloc(p, sz);
+  void* x = (p == NULL) ? os::malloc(sz, mtInternal) : os::realloc(p, sz, mtInternal);
   if (x == NULL) {
     THROW_0(vmSymbols::java_lang_OutOfMemoryError());
   }
@@ -877,7 +877,7 @@
         return 0;
     }
 
-    body = NEW_C_HEAP_ARRAY(jbyte, length);
+    body = NEW_C_HEAP_ARRAY(jbyte, length, mtInternal);
 
     if (body == 0) {
         throw_new(env, "OutOfMemoryError");
@@ -893,7 +893,7 @@
         uint len = env->GetStringUTFLength(name);
         int unicode_len = env->GetStringLength(name);
         if (len >= sizeof(buf)) {
-            utfName = NEW_C_HEAP_ARRAY(char, len + 1);
+            utfName = NEW_C_HEAP_ARRAY(char, len + 1, mtInternal);
             if (utfName == NULL) {
                 throw_new(env, "OutOfMemoryError");
                 goto free_body;
@@ -913,10 +913,10 @@
     result = JVM_DefineClass(env, utfName, loader, body, length, pd);
 
     if (utfName && utfName != buf)
-        FREE_C_HEAP_ARRAY(char, utfName);
+        FREE_C_HEAP_ARRAY(char, utfName, mtInternal);
 
  free_body:
-    FREE_C_HEAP_ARRAY(jbyte, body);
+    FREE_C_HEAP_ARRAY(jbyte, body, mtInternal);
     return result;
   }
 }
@@ -1011,7 +1011,7 @@
 
   jint length = typeArrayOop(JNIHandles::resolve_non_null(data))->length();
   jint word_length = (length + sizeof(HeapWord)-1) / sizeof(HeapWord);
-  HeapWord* body = NEW_C_HEAP_ARRAY(HeapWord, word_length);
+  HeapWord* body = NEW_C_HEAP_ARRAY(HeapWord, word_length, mtInternal);
   if (body == NULL) {
     THROW_0(vmSymbols::java_lang_OutOfMemoryError());
   }
@@ -1095,7 +1095,7 @@
 
   // try/finally clause:
   if (temp_alloc != NULL) {
-    FREE_C_HEAP_ARRAY(HeapWord, temp_alloc);
+    FREE_C_HEAP_ARRAY(HeapWord, temp_alloc, mtInternal);
   }
 
   return (jclass) res_jh;
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -35,6 +35,7 @@
 #include "runtime/globals_extension.hpp"
 #include "runtime/java.hpp"
 #include "services/management.hpp"
+#include "services/memTracker.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/taskqueue.hpp"
 #ifdef TARGET_OS_FAMILY_linux
@@ -368,7 +369,7 @@
 inline void SysClassPath::reset_item_at(int index) {
   assert(index < _scp_nitems && index != _scp_base, "just checking");
   if (_items[index] != NULL) {
-    FREE_C_HEAP_ARRAY(char, _items[index]);
+    FREE_C_HEAP_ARRAY(char, _items[index], mtInternal);
     _items[index] = NULL;
   }
 }
@@ -400,11 +401,11 @@
       expanded_path = add_jars_to_path(expanded_path, path);
       path = end;
     } else {
-      char* dirpath = NEW_C_HEAP_ARRAY(char, tmp_end - path + 1);
+      char* dirpath = NEW_C_HEAP_ARRAY(char, tmp_end - path + 1, mtInternal);
       memcpy(dirpath, path, tmp_end - path);
       dirpath[tmp_end - path] = '\0';
       expanded_path = add_jars_to_path(expanded_path, dirpath);
-      FREE_C_HEAP_ARRAY(char, dirpath);
+      FREE_C_HEAP_ARRAY(char, dirpath, mtInternal);
       path = tmp_end + 1;
     }
   }
@@ -435,7 +436,7 @@
   assert(total_len > 0, "empty sysclasspath not allowed");
 
   // Copy the _items to a single string.
-  char* cp = NEW_C_HEAP_ARRAY(char, total_len);
+  char* cp = NEW_C_HEAP_ARRAY(char, total_len, mtInternal);
   char* cp_tmp = cp;
   for (i = 0; i < _scp_nitems; ++i) {
     if (_items[i] != NULL) {
@@ -456,7 +457,7 @@
   assert(str != NULL, "just checking");
   if (path == NULL) {
     size_t len = strlen(str) + 1;
-    cp = NEW_C_HEAP_ARRAY(char, len);
+    cp = NEW_C_HEAP_ARRAY(char, len, mtInternal);
     memcpy(cp, str, len);                       // copy the trailing null
   } else {
     const char separator = *os::path_separator();
@@ -465,15 +466,15 @@
     size_t len = old_len + str_len + 2;
 
     if (prepend) {
-      cp = NEW_C_HEAP_ARRAY(char, len);
+      cp = NEW_C_HEAP_ARRAY(char, len, mtInternal);
       char* cp_tmp = cp;
       memcpy(cp_tmp, str, str_len);
       cp_tmp += str_len;
       *cp_tmp = separator;
       memcpy(++cp_tmp, path, old_len + 1);      // copy the trailing null
-      FREE_C_HEAP_ARRAY(char, path);
+      FREE_C_HEAP_ARRAY(char, path, mtInternal);
     } else {
-      cp = REALLOC_C_HEAP_ARRAY(char, path, len);
+      cp = REALLOC_C_HEAP_ARRAY(char, path, len, mtInternal);
       char* cp_tmp = cp + old_len;
       *cp_tmp = separator;
       memcpy(++cp_tmp, str, str_len + 1);       // copy the trailing null
@@ -495,7 +496,7 @@
 
   /* Scan the directory for jars/zips, appending them to path. */
   struct dirent *entry;
-  char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(directory));
+  char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(directory), mtInternal);
   while ((entry = os::readdir(dir, (dirent *) dbuf)) != NULL) {
     const char* name = entry->d_name;
     const char* ext = name + strlen(name) - 4;
@@ -503,13 +504,13 @@
       (os::file_name_strcmp(ext, ".jar") == 0 ||
        os::file_name_strcmp(ext, ".zip") == 0);
     if (isJarOrZip) {
-      char* jarpath = NEW_C_HEAP_ARRAY(char, directory_len + 2 + strlen(name));
+      char* jarpath = NEW_C_HEAP_ARRAY(char, directory_len + 2 + strlen(name), mtInternal);
       sprintf(jarpath, "%s%s%s", directory, dir_sep, name);
       path = add_to_path(path, jarpath, false);
-      FREE_C_HEAP_ARRAY(char, jarpath);
+      FREE_C_HEAP_ARRAY(char, jarpath, mtInternal);
     }
   }
-  FREE_C_HEAP_ARRAY(char, dbuf);
+  FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
   os::closedir(dir);
   return path;
 }
@@ -631,7 +632,7 @@
 static bool set_string_flag(char* name, const char* value, FlagValueOrigin origin) {
   if (!CommandLineFlags::ccstrAtPut(name, &value, origin))  return false;
   // Contract:  CommandLineFlags always returns a pointer that needs freeing.
-  FREE_C_HEAP_ARRAY(char, value);
+  FREE_C_HEAP_ARRAY(char, value, mtInternal);
   return true;
 }
 
@@ -647,7 +648,7 @@
   } else if (new_len == 0) {
     value = old_value;
   } else {
-    char* buf = NEW_C_HEAP_ARRAY(char, old_len + 1 + new_len + 1);
+    char* buf = NEW_C_HEAP_ARRAY(char, old_len + 1 + new_len + 1, mtInternal);
     // each new setting adds another LINE to the switch:
     sprintf(buf, "%s\n%s", old_value, new_value);
     value = buf;
@@ -655,10 +656,10 @@
   }
   (void) CommandLineFlags::ccstrAtPut(name, &value, origin);
   // CommandLineFlags always returns a pointer that needs freeing.
-  FREE_C_HEAP_ARRAY(char, value);
+  FREE_C_HEAP_ARRAY(char, value, mtInternal);
   if (free_this_too != NULL) {
     // CommandLineFlags made its own copy, so I must delete my own temp. buffer.
-    FREE_C_HEAP_ARRAY(char, free_this_too);
+    FREE_C_HEAP_ARRAY(char, free_this_too, mtInternal);
   }
   return true;
 }
@@ -735,9 +736,9 @@
   // expand the array and add arg to the last element
   (*count)++;
   if (*bldarray == NULL) {
-    *bldarray = NEW_C_HEAP_ARRAY(char*, *count);
+    *bldarray = NEW_C_HEAP_ARRAY(char*, *count, mtInternal);
   } else {
-    *bldarray = REALLOC_C_HEAP_ARRAY(char*, *bldarray, *count);
+    *bldarray = REALLOC_C_HEAP_ARRAY(char*, *bldarray, *count, mtInternal);
   }
   (*bldarray)[index] = strdup(arg);
 }
@@ -917,13 +918,13 @@
   char* value = (char *)ns;
 
   size_t key_len = (eq == NULL) ? strlen(prop) : (eq - prop);
-  key = AllocateHeap(key_len + 1, "add_property");
+  key = AllocateHeap(key_len + 1, mtInternal);
   strncpy(key, prop, key_len);
   key[key_len] = '\0';
 
   if (eq != NULL) {
     size_t value_len = strlen(prop) - key_len - 1;
-    value = AllocateHeap(value_len + 1, "add_property");
+    value = AllocateHeap(value_len + 1, mtInternal);
     strncpy(value, &prop[key_len + 1], value_len + 1);
   }
 
@@ -2058,12 +2059,12 @@
     const char* altclasses_jar = "alt-rt.jar";
     size_t altclasses_path_len = strlen(get_meta_index_dir()) + 1 +
                                  strlen(altclasses_jar);
-    char* altclasses_path = NEW_C_HEAP_ARRAY(char, altclasses_path_len);
+    char* altclasses_path = NEW_C_HEAP_ARRAY(char, altclasses_path_len, mtInternal);
     strcpy(altclasses_path, get_meta_index_dir());
     strcat(altclasses_path, altclasses_jar);
     scp.add_suffix_to_prefix(altclasses_path);
     scp_assembly_required = true;
-    FREE_C_HEAP_ARRAY(char, altclasses_path);
+    FREE_C_HEAP_ARRAY(char, altclasses_path, mtInternal);
   }
 
   if (WhiteBoxAPI) {
@@ -2071,12 +2072,12 @@
     const char* wb_jar = "wb.jar";
     size_t wb_path_len = strlen(get_meta_index_dir()) + 1 +
                          strlen(wb_jar);
-    char* wb_path = NEW_C_HEAP_ARRAY(char, wb_path_len);
+    char* wb_path = NEW_C_HEAP_ARRAY(char, wb_path_len, mtInternal);
     strcpy(wb_path, get_meta_index_dir());
     strcat(wb_path, wb_jar);
     scp.add_suffix(wb_path);
     scp_assembly_required = true;
-    FREE_C_HEAP_ARRAY(char, wb_path);
+    FREE_C_HEAP_ARRAY(char, wb_path, mtInternal);
   }
 
   // Parse _JAVA_OPTIONS environment variable (if present) (mimics classic VM)
@@ -2161,13 +2162,13 @@
       if (tail != NULL) {
         const char* pos = strchr(tail, ':');
         size_t len = (pos == NULL) ? strlen(tail) : pos - tail;
-        char* name = (char*)memcpy(NEW_C_HEAP_ARRAY(char, len + 1), tail, len);
+        char* name = (char*)memcpy(NEW_C_HEAP_ARRAY(char, len + 1, mtInternal), tail, len);
         name[len] = '\0';
 
         char *options = NULL;
         if(pos != NULL) {
           size_t len2 = strlen(pos+1) + 1; // options start after ':'.  Final zero must be copied.
-          options = (char*)memcpy(NEW_C_HEAP_ARRAY(char, len2), pos+1, len2);
+          options = (char*)memcpy(NEW_C_HEAP_ARRAY(char, len2, mtInternal), pos+1, len2);
         }
 #ifdef JVMTI_KERNEL
         if ((strcmp(name, "hprof") == 0) || (strcmp(name, "jdwp") == 0)) {
@@ -2182,12 +2183,12 @@
       if(tail != NULL) {
         const char* pos = strchr(tail, '=');
         size_t len = (pos == NULL) ? strlen(tail) : pos - tail;
-        char* name = strncpy(NEW_C_HEAP_ARRAY(char, len + 1), tail, len);
+        char* name = strncpy(NEW_C_HEAP_ARRAY(char, len + 1, mtInternal), tail, len);
         name[len] = '\0';
 
         char *options = NULL;
         if(pos != NULL) {
-          options = strcpy(NEW_C_HEAP_ARRAY(char, strlen(pos + 1) + 1), pos + 1);
+          options = strcpy(NEW_C_HEAP_ARRAY(char, strlen(pos + 1) + 1, mtInternal), pos + 1);
         }
 #ifdef JVMTI_KERNEL
         if ((strcmp(name, "hprof") == 0) || (strcmp(name, "jdwp") == 0)) {
@@ -2200,7 +2201,7 @@
     // -javaagent
     } else if (match_option(option, "-javaagent:", &tail)) {
       if(tail != NULL) {
-        char *options = strcpy(NEW_C_HEAP_ARRAY(char, strlen(tail) + 1), tail);
+        char *options = strcpy(NEW_C_HEAP_ARRAY(char, strlen(tail) + 1, mtInternal), tail);
         add_init_agent("instrument", options, false);
       }
     // -Xnoclassgc
@@ -2958,7 +2959,7 @@
   char *end = strrchr(jvm_path, *os::file_separator());
   if (end != NULL) *end = '\0';
   char *shared_archive_path = NEW_C_HEAP_ARRAY(char, strlen(jvm_path) +
-                                        strlen(os::file_separator()) + 20);
+      strlen(os::file_separator()) + 20, mtInternal);
   if (shared_archive_path == NULL) return JNI_ENOMEM;
   strcpy(shared_archive_path, jvm_path);
   strcat(shared_archive_path, os::file_separator());
@@ -2996,6 +2997,10 @@
       CommandLineFlags::printFlags(tty, false);
       vm_exit(0);
     }
+    if (match_option(option, "-XX:NativeMemoryTracking", &tail)) {
+      MemTracker::init_tracking_options(tail);
+    }
+
 
 #ifndef PRODUCT
     if (match_option(option, "-XX:+PrintFlagsWithComments", &tail)) {
@@ -3331,7 +3336,7 @@
     }
   }
   // Add one for null terminator.
-  char *props = AllocateHeap(length + 1, "get_kernel_properties");
+  char *props = AllocateHeap(length + 1, mtInternal);
   if (length != 0) {
     int pos = 0;
     for (prop = _system_properties; prop != NULL; prop = prop->next()) {
--- a/hotspot/src/share/vm/runtime/arguments.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -44,7 +44,7 @@
 
 // Element describing System and User (-Dkey=value flags) defined property.
 
-class SystemProperty: public CHeapObj {
+class SystemProperty: public CHeapObj<mtInternal> {
  private:
   char*           _key;
   char*           _value;
@@ -63,7 +63,7 @@
       if (_value != NULL) {
         FreeHeap(_value);
       }
-      _value = AllocateHeap(strlen(value)+1);
+      _value = AllocateHeap(strlen(value)+1, mtInternal);
       if (_value != NULL) {
         strcpy(_value, value);
       }
@@ -80,7 +80,7 @@
       if (_value != NULL) {
         len += strlen(_value);
       }
-      sp = AllocateHeap(len+2);
+      sp = AllocateHeap(len+2, mtInternal);
       if (sp != NULL) {
         if (_value != NULL) {
           strcpy(sp, _value);
@@ -100,13 +100,13 @@
     if (key == NULL) {
       _key = NULL;
     } else {
-      _key = AllocateHeap(strlen(key)+1);
+      _key = AllocateHeap(strlen(key)+1, mtInternal);
       strcpy(_key, key);
     }
     if (value == NULL) {
       _value = NULL;
     } else {
-      _value = AllocateHeap(strlen(value)+1);
+      _value = AllocateHeap(strlen(value)+1, mtInternal);
       strcpy(_value, value);
     }
     _next = NULL;
@@ -116,7 +116,7 @@
 
 
 // For use by -agentlib, -agentpath and -Xrun
-class AgentLibrary : public CHeapObj {
+class AgentLibrary : public CHeapObj<mtInternal> {
   friend class AgentLibraryList;
  private:
   char*           _name;
@@ -136,12 +136,12 @@
 
   // Constructor
   AgentLibrary(const char* name, const char* options, bool is_absolute_path, void* os_lib) {
-    _name = AllocateHeap(strlen(name)+1);
+    _name = AllocateHeap(strlen(name)+1, mtInternal);
     strcpy(_name, name);
     if (options == NULL) {
       _options = NULL;
     } else {
-      _options = AllocateHeap(strlen(options)+1);
+      _options = AllocateHeap(strlen(options)+1, mtInternal);
       strcpy(_options, options);
     }
     _is_absolute_path = is_absolute_path;
--- a/hotspot/src/share/vm/runtime/biasedLocking.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/biasedLocking.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -687,8 +687,8 @@
   // monitors in a prepass and, if they are biased, preserve their
   // mark words here. This should be a relatively small set of objects
   // especially compared to the number of objects in the heap.
-  _preserved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(10, true);
-  _preserved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<Handle>(10, true);
+  _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true);
+  _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
 
   ResourceMark rm;
   Thread* cur = Thread::current();
--- a/hotspot/src/share/vm/runtime/compilationPolicy.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/compilationPolicy.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -37,7 +37,7 @@
 class CompileTask;
 class CompileQueue;
 
-class CompilationPolicy : public CHeapObj {
+class CompilationPolicy : public CHeapObj<mtCompiler> {
   static CompilationPolicy* _policy;
   // Accumulated time
   static elapsedTimer       _accumulated_time;
--- a/hotspot/src/share/vm/runtime/deoptimization.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/deoptimization.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -101,7 +101,7 @@
   _number_of_frames          = number_of_frames;
   _frame_sizes               = frame_sizes;
   _frame_pcs                 = frame_pcs;
-  _register_block            = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2);
+  _register_block            = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2, mtCompiler);
   _return_type               = return_type;
   _initial_info              = 0;
   // PD (x86 only)
@@ -114,9 +114,9 @@
 
 
 Deoptimization::UnrollBlock::~UnrollBlock() {
-  FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes);
-  FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs);
-  FREE_C_HEAP_ARRAY(intptr_t, _register_block);
+  FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes, mtCompiler);
+  FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs, mtCompiler);
+  FREE_C_HEAP_ARRAY(intptr_t, _register_block, mtCompiler);
 }
 
 
@@ -358,9 +358,9 @@
 
   // Compute the vframes' sizes.  Note that frame_sizes[] entries are ordered from outermost to innermost
   // virtual activation, which is the reverse of the elements in the vframes array.
-  intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames);
+  intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames, mtCompiler);
   // +1 because we always have an interpreter return address for the final slot.
-  address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1);
+  address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1, mtCompiler);
   int popframe_extra_args = 0;
   // Create an interpreter return address for the stub to use as its return
   // address so the skeletal frames are perfectly walkable
--- a/hotspot/src/share/vm/runtime/deoptimization.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/deoptimization.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -129,7 +129,7 @@
 
   // UnrollBlock is returned by fetch_unroll_info() to the deoptimization handler (blob).
   // This is only a CheapObj to ease debugging after a deopt failure
-  class UnrollBlock : public CHeapObj {
+  class UnrollBlock : public CHeapObj<mtCompiler> {
    private:
     int       _size_of_deoptimized_frame; // Size, in bytes, of current deoptimized frame
     int       _caller_adjustment;         // Adjustment, in bytes, to caller's SP by initial interpreted frame
--- a/hotspot/src/share/vm/runtime/dtraceJSDT.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/dtraceJSDT.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -63,7 +63,7 @@
   static jboolean is_supported();
 };
 
-class RegisteredProbes : public CHeapObj {
+class RegisteredProbes : public CHeapObj<mtInternal> {
  private:
   nmethod** _nmethods;      // all the probe methods
   size_t    _count;         // number of probe methods
@@ -72,7 +72,7 @@
  public:
   RegisteredProbes(size_t count) {
     _count = count;
-    _nmethods = NEW_C_HEAP_ARRAY(nmethod*, count);
+    _nmethods = NEW_C_HEAP_ARRAY(nmethod*, count, mtInternal);
   }
 
   ~RegisteredProbes() {
@@ -81,7 +81,7 @@
       _nmethods[i]->make_not_entrant();
       _nmethods[i]->method()->clear_code();
     }
-    FREE_C_HEAP_ARRAY(nmethod*, _nmethods);
+    FREE_C_HEAP_ARRAY(nmethod*, _nmethods, mtInternal);
     _nmethods = NULL;
     _count = 0;
   }
--- a/hotspot/src/share/vm/runtime/fprofiler.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/fprofiler.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -70,12 +70,12 @@
 ThreadProfiler::ThreadProfiler() {
   // Space for the ProfilerNodes
   const int area_size = 1 * ProfilerNodeSize * 1024;
-  area_bottom = AllocateHeap(area_size, "fprofiler");
+  area_bottom = AllocateHeap(area_size, mtInternal);
   area_top    = area_bottom;
   area_limit  = area_bottom + area_size;
 
   // ProfilerNode pointer table
-  table = NEW_C_HEAP_ARRAY(ProfilerNode*, table_size);
+  table = NEW_C_HEAP_ARRAY(ProfilerNode*, table_size, mtInternal);
   initialize();
   engaged = false;
 }
@@ -157,7 +157,7 @@
 void PCRecorder::init() {
   MutexLockerEx lm(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   int s = size();
-  counters = NEW_C_HEAP_ARRAY(int, s);
+  counters = NEW_C_HEAP_ARRAY(int, s, mtInternal);
   for (int index = 0; index < s; index++) {
     counters[index] = 0;
   }
@@ -850,7 +850,7 @@
   if (Threads_lock->try_lock()) {
     {  // Threads_lock scope
       maxthreads = Threads::number_of_threads();
-      threadsList = NEW_C_HEAP_ARRAY(JavaThread *, maxthreads);
+      threadsList = NEW_C_HEAP_ARRAY(JavaThread *, maxthreads, mtInternal);
       suspendedthreadcount = 0;
       for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
         if (tp->is_Compiler_thread()) {
@@ -1195,8 +1195,8 @@
 
 void FlatProfiler::allocate_table() {
   { // Bytecode table
-    bytecode_ticks      = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes);
-    bytecode_ticks_stub = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes);
+    bytecode_ticks      = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes, mtInternal);
+    bytecode_ticks_stub = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes, mtInternal);
     for(int index = 0; index < Bytecodes::number_of_codes; index++) {
       bytecode_ticks[index]      = 0;
       bytecode_ticks_stub[index] = 0;
@@ -1205,7 +1205,7 @@
 
   if (ProfilerRecordPC) PCRecorder::init();
 
-  interval_data         = NEW_C_HEAP_ARRAY(IntervalData, interval_print_size);
+  interval_data         = NEW_C_HEAP_ARRAY(IntervalData, interval_print_size, mtInternal);
   FlatProfiler::interval_reset();
 }
 
--- a/hotspot/src/share/vm/runtime/fprofiler.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/fprofiler.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -121,7 +121,7 @@
 };
 #endif // FPROF_KERNEL
 
-class ThreadProfiler: public CHeapObj {
+class ThreadProfiler: public CHeapObj<mtInternal> {
 public:
   ThreadProfiler()    KERNEL_RETURN;
   ~ThreadProfiler()   KERNEL_RETURN;
--- a/hotspot/src/share/vm/runtime/globals.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/globals.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -465,13 +465,13 @@
   ccstr old_value = result->get_ccstr();
   char* new_value = NULL;
   if (*value != NULL) {
-    new_value = NEW_C_HEAP_ARRAY(char, strlen(*value)+1);
+    new_value = NEW_C_HEAP_ARRAY(char, strlen(*value)+1, mtInternal);
     strcpy(new_value, *value);
   }
   result->set_ccstr(new_value);
   if (result->origin == DEFAULT && old_value != NULL) {
     // Prior value is NOT heap allocated, but was a literal constant.
-    char* old_value_to_free = NEW_C_HEAP_ARRAY(char, strlen(old_value)+1);
+    char* old_value_to_free = NEW_C_HEAP_ARRAY(char, strlen(old_value)+1, mtInternal);
     strcpy(old_value_to_free, old_value);
     old_value = old_value_to_free;
   }
@@ -485,12 +485,12 @@
   Flag* faddr = address_of_flag(flag);
   guarantee(faddr != NULL && faddr->is_ccstr(), "wrong flag type");
   ccstr old_value = faddr->get_ccstr();
-  char* new_value = NEW_C_HEAP_ARRAY(char, strlen(value)+1);
+  char* new_value = NEW_C_HEAP_ARRAY(char, strlen(value)+1, mtInternal);
   strcpy(new_value, value);
   faddr->set_ccstr(new_value);
   if (faddr->origin != DEFAULT && old_value != NULL) {
     // Prior value is heap allocated so free it.
-    FREE_C_HEAP_ARRAY(char, old_value);
+    FREE_C_HEAP_ARRAY(char, old_value, mtInternal);
   }
   faddr->origin = origin;
 }
@@ -511,7 +511,7 @@
   while (flagTable[length].name != NULL) length++;
 
   // Sort
-  Flag** array = NEW_C_HEAP_ARRAY(Flag*, length);
+  Flag** array = NEW_C_HEAP_ARRAY(Flag*, length, mtInternal);
   for (int index = 0; index < length; index++) {
     array[index] = &flagTable[index];
   }
@@ -525,7 +525,7 @@
     }
   }
   out->cr();
-  FREE_C_HEAP_ARRAY(Flag*, array);
+  FREE_C_HEAP_ARRAY(Flag*, array, mtInternal);
 }
 
 #ifndef PRODUCT
@@ -547,7 +547,7 @@
   while (flagTable[length].name != NULL) length++;
 
   // Sort
-  Flag** array = NEW_C_HEAP_ARRAY(Flag*, length);
+  Flag** array = NEW_C_HEAP_ARRAY(Flag*, length, mtInternal);
   for (int index = 0; index < length; index++) {
     array[index] = &flagTable[index];
   }
@@ -560,5 +560,5 @@
       array[i]->print_on(out, withComments);
     }
   }
-  FREE_C_HEAP_ARRAY(Flag*, array);
+  FREE_C_HEAP_ARRAY(Flag*, array, mtInternal);
 }
--- a/hotspot/src/share/vm/runtime/globals.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -190,7 +190,6 @@
 
 #endif // no compilers
 
-
 // string type aliases used only in this file
 typedef const char* ccstr;
 typedef const char* ccstrlist;   // represents string arguments which accumulate
@@ -896,6 +895,9 @@
   develop(bool, UseFakeTimers, false,                                       \
           "Tells whether the VM should use system time or a fake timer")    \
                                                                             \
+  product(ccstr, NativeMemoryTracking, "off",                               \
+          "Native memory tracking options")                                 \
+                                                                            \
   diagnostic(bool, LogCompilation, false,                                   \
           "Log compilation activity in detail to hotspot.log or LogFile")   \
                                                                             \
--- a/hotspot/src/share/vm/runtime/handles.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/handles.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -111,7 +111,7 @@
   _chunk = _area->_chunk;
   _hwm   = _area->_hwm;
   _max   = _area->_max;
-  NOT_PRODUCT(_size_in_bytes = _area->_size_in_bytes;)
+  _size_in_bytes = _area->_size_in_bytes;
   debug_only(_area->_handle_mark_nesting++);
   assert(_area->_handle_mark_nesting > 0, "must stack allocate HandleMarks");
   debug_only(Atomic::inc(&_nof_handlemarks);)
@@ -159,7 +159,7 @@
   area->_chunk = _chunk;
   area->_hwm = _hwm;
   area->_max = _max;
-  NOT_PRODUCT(area->set_size_in_bytes(_size_in_bytes);)
+  area->set_size_in_bytes(_size_in_bytes);
 #ifdef ASSERT
   // clear out first chunk (to detect allocation bugs)
   if (ZapVMHandleArea) {
--- a/hotspot/src/share/vm/runtime/handles.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/handles.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -238,7 +238,6 @@
 
 //------------------------------------------------------------------------------------------------------------------------
 // Thread local handle area
-
 class HandleArea: public Arena {
   friend class HandleMark;
   friend class NoHandleMark;
@@ -312,7 +311,7 @@
   HandleArea *_area;            // saved handle area
   Chunk *_chunk;                // saved arena chunk
   char *_hwm, *_max;            // saved arena info
-  NOT_PRODUCT(size_t _size_in_bytes;) // size of handle area
+  size_t _size_in_bytes;        // size of handle area
   // Link to previous active HandleMark in thread
   HandleMark* _previous_handle_mark;
 
--- a/hotspot/src/share/vm/runtime/handles.inline.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/handles.inline.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -85,7 +85,7 @@
   area->_chunk = _chunk;
   area->_hwm = _hwm;
   area->_max = _max;
-  NOT_PRODUCT(area->set_size_in_bytes(_size_in_bytes);)
+  area->set_size_in_bytes(_size_in_bytes);
   debug_only(area->_handle_mark_nesting--);
 }
 
--- a/hotspot/src/share/vm/runtime/java.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/java.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -384,7 +384,7 @@
     typedef void (*__exit_proc)(void);
 }
 
-class ExitProc : public CHeapObj {
+class ExitProc : public CHeapObj<mtInternal> {
  private:
   __exit_proc _proc;
   // void (*_proc)(void);
--- a/hotspot/src/share/vm/runtime/jniHandles.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/jniHandles.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -109,7 +109,7 @@
 
 // JNI handle blocks holding local/global JNI handles
 
-class JNIHandleBlock : public CHeapObj {
+class JNIHandleBlock : public CHeapObj<mtInternal> {
   friend class VMStructs;
   friend class CppInterpreter;
 
--- a/hotspot/src/share/vm/runtime/monitorChunk.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/monitorChunk.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -29,7 +29,7 @@
 
 MonitorChunk::MonitorChunk(int number_on_monitors) {
   _number_of_monitors = number_on_monitors;
-  _monitors           = NEW_C_HEAP_ARRAY(BasicObjectLock, number_on_monitors);
+  _monitors           = NEW_C_HEAP_ARRAY(BasicObjectLock, number_on_monitors, mtInternal);
   _next               = NULL;
 }
 
--- a/hotspot/src/share/vm/runtime/monitorChunk.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/monitorChunk.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -30,7 +30,7 @@
 // Data structure for holding monitors for one activation during
 // deoptimization.
 
-class MonitorChunk: public CHeapObj {
+class MonitorChunk: public CHeapObj<mtInternal> {
  private:
   int              _number_of_monitors;
   BasicObjectLock* _monitors;
--- a/hotspot/src/share/vm/runtime/mutex.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/mutex.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -84,7 +84,7 @@
 // The default length of monitor name is chosen to be 64 to avoid false sharing.
 static const int MONITOR_NAME_LEN = 64;
 
-class Monitor : public CHeapObj {
+class Monitor : public CHeapObj<mtInternal> {
 
  public:
   // A special lock: Is a lock where you are guaranteed not to block while you are
--- a/hotspot/src/share/vm/runtime/os.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/os.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -45,6 +45,7 @@
 #include "runtime/os.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "services/attachListener.hpp"
+#include "services/memTracker.hpp"
 #include "services/threadService.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/events.hpp"
@@ -433,9 +434,9 @@
 
 // --------------------- heap allocation utilities ---------------------
 
-char *os::strdup(const char *str) {
+char *os::strdup(const char *str, MEMFLAGS flags) {
   size_t size = strlen(str);
-  char *dup_str = (char *)malloc(size + 1);
+  char *dup_str = (char *)malloc(size + 1, flags);
   if (dup_str == NULL) return NULL;
   strcpy(dup_str, str);
   return dup_str;
@@ -559,7 +560,7 @@
 }
 #endif
 
-void* os::malloc(size_t size) {
+void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
   NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
   NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
 
@@ -571,6 +572,7 @@
 
   NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
   u_char* ptr = (u_char*)::malloc(size + space_before + space_after);
+
 #ifdef ASSERT
   if (ptr == NULL) return NULL;
   if (MallocCushion) {
@@ -589,18 +591,29 @@
   }
   debug_only(if (paranoid) verify_block(memblock));
   if (PrintMalloc && tty != NULL) tty->print_cr("os::malloc " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, memblock);
+
+  // we do not track MallocCushion memory
+  if (MemTracker::is_on()) {
+    MemTracker::record_malloc((address)memblock, size, memflags, caller == 0 ? CALLER_PC : caller);
+  }
+
   return memblock;
 }
 
 
-void* os::realloc(void *memblock, size_t size) {
+void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller) {
 #ifndef ASSERT
   NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
   NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
-  return ::realloc(memblock, size);
+  void* ptr = ::realloc(memblock, size);
+  if (ptr != NULL && MemTracker::is_on()) {
+    MemTracker::record_realloc((address)memblock, (address)ptr, size, memflags,
+     caller == 0 ? CALLER_PC : caller);
+  }
+  return ptr;
 #else
   if (memblock == NULL) {
-    return malloc(size);
+    return malloc(size, memflags, (caller == 0 ? CALLER_PC : caller));
   }
   if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
     tty->print_cr("os::realloc caught " PTR_FORMAT, memblock);
@@ -610,7 +623,7 @@
   NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
   if (size == 0) return NULL;
   // always move the block
-  void* ptr = malloc(size);
+  void* ptr = malloc(size, memflags, caller == 0 ? CALLER_PC : caller);
   if (PrintMalloc) tty->print_cr("os::remalloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr);
   // Copy to new memory if malloc didn't fail
   if ( ptr != NULL ) {
@@ -627,7 +640,7 @@
 }
 
 
-void  os::free(void *memblock) {
+void  os::free(void *memblock, MEMFLAGS memflags) {
   NOT_PRODUCT(inc_stat_counter(&num_frees, 1));
 #ifdef ASSERT
   if (memblock == NULL) return;
@@ -660,6 +673,8 @@
     fprintf(stderr, "os::free " PTR_FORMAT "\n", (uintptr_t)memblock);
   }
 #endif
+  MemTracker::record_free((address)memblock, memflags);
+
   ::free((char*)memblock - space_before);
 }
 
@@ -1048,7 +1063,7 @@
         ++formatted_path_len;
     }
 
-    char* formatted_path = NEW_C_HEAP_ARRAY(char, formatted_path_len + 1);
+    char* formatted_path = NEW_C_HEAP_ARRAY(char, formatted_path_len + 1, mtInternal);
     if (formatted_path == NULL) {
         return NULL;
     }
@@ -1127,7 +1142,7 @@
     return NULL;
   }
   const char psepchar = *os::path_separator();
-  char* inpath = (char*)NEW_C_HEAP_ARRAY(char, strlen(path) + 1);
+  char* inpath = (char*)NEW_C_HEAP_ARRAY(char, strlen(path) + 1, mtInternal);
   if (inpath == NULL) {
     return NULL;
   }
@@ -1140,7 +1155,7 @@
     p++;
     p = strchr(p, psepchar);
   }
-  char** opath = (char**) NEW_C_HEAP_ARRAY(char*, count);
+  char** opath = (char**) NEW_C_HEAP_ARRAY(char*, count, mtInternal);
   if (opath == NULL) {
     return NULL;
   }
@@ -1153,7 +1168,7 @@
       return NULL;
     }
     // allocate the string and add terminator storage
-    char* s  = (char*)NEW_C_HEAP_ARRAY(char, len + 1);
+    char* s  = (char*)NEW_C_HEAP_ARRAY(char, len + 1, mtInternal);
     if (s == NULL) {
       return NULL;
     }
@@ -1162,7 +1177,7 @@
     opath[i] = s;
     p += len + 1;
   }
-  FREE_C_HEAP_ARRAY(char, inpath);
+  FREE_C_HEAP_ARRAY(char, inpath, mtInternal);
   *n = count;
   return opath;
 }
@@ -1366,3 +1381,97 @@
 
   return (int) i;
 }
+
+bool os::create_stack_guard_pages(char* addr, size_t bytes) {
+  return os::pd_create_stack_guard_pages(addr, bytes);
+}
+
+
+char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
+  char* result = pd_reserve_memory(bytes, addr, alignment_hint);
+  if (result != NULL && MemTracker::is_on()) {
+    MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
+  }
+
+  return result;
+}
+char* os::attempt_reserve_memory_at(size_t bytes, char* addr) {
+  char* result = pd_attempt_reserve_memory_at(bytes, addr);
+  if (result != NULL && MemTracker::is_on()) {
+    MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
+  }
+  return result;
+}
+
+void os::split_reserved_memory(char *base, size_t size,
+                                 size_t split, bool realloc) {
+  pd_split_reserved_memory(base, size, split, realloc);
+}
+
+bool os::commit_memory(char* addr, size_t bytes, bool executable) {
+  bool res = pd_commit_memory(addr, bytes, executable);
+  if (res && MemTracker::is_on()) {
+    MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC);
+  }
+  return res;
+}
+
+bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
+                              bool executable) {
+  bool res = os::pd_commit_memory(addr, size, alignment_hint, executable);
+  if (res && MemTracker::is_on()) {
+    MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC);
+  }
+  return res;
+}
+
+bool os::uncommit_memory(char* addr, size_t bytes) {
+  bool res = pd_uncommit_memory(addr, bytes);
+  if (res) {
+    MemTracker::record_virtual_memory_uncommit((address)addr, bytes);
+  }
+  return res;
+}
+
+bool os::release_memory(char* addr, size_t bytes) {
+  bool res = pd_release_memory(addr, bytes);
+  if (res) {
+    MemTracker::record_virtual_memory_release((address)addr, bytes);
+  }
+  return res;
+}
+
+
+char* os::map_memory(int fd, const char* file_name, size_t file_offset,
+                           char *addr, size_t bytes, bool read_only,
+                           bool allow_exec) {
+  char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec);
+  if (result != NULL && MemTracker::is_on()) {
+    MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
+  }
+  return result;
+}
+
+char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
+                             char *addr, size_t bytes, bool read_only,
+                             bool allow_exec) {
+  return pd_remap_memory(fd, file_name, file_offset, addr, bytes,
+                    read_only, allow_exec);
+}
+
+bool os::unmap_memory(char *addr, size_t bytes) {
+  bool result = pd_unmap_memory(addr, bytes);
+  if (result) {
+    MemTracker::record_virtual_memory_release((address)addr, bytes);
+  }
+  return result;
+}
+
+void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) {
+  pd_free_memory(addr, bytes, alignment_hint);
+}
+
+void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
+  pd_realign_memory(addr, bytes, alignment_hint);
+}
+
--- a/hotspot/src/share/vm/runtime/os.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/os.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -99,6 +99,28 @@
     _page_sizes[1] = 0; // sentinel
   }
 
+  static char*  pd_reserve_memory(size_t bytes, char* addr = 0,
+                               size_t alignment_hint = 0);
+  static char*  pd_attempt_reserve_memory_at(size_t bytes, char* addr);
+  static void   pd_split_reserved_memory(char *base, size_t size,
+                                      size_t split, bool realloc);
+  static bool   pd_commit_memory(char* addr, size_t bytes, bool executable = false);
+  static bool   pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
+                              bool executable = false);
+  static bool   pd_uncommit_memory(char* addr, size_t bytes);
+  static bool   pd_release_memory(char* addr, size_t bytes);
+
+  static char*  pd_map_memory(int fd, const char* file_name, size_t file_offset,
+                           char *addr, size_t bytes, bool read_only = false,
+                           bool allow_exec = false);
+  static char*  pd_remap_memory(int fd, const char* file_name, size_t file_offset,
+                             char *addr, size_t bytes, bool read_only,
+                             bool allow_exec);
+  static bool   pd_unmap_memory(char *addr, size_t bytes);
+  static void   pd_free_memory(char *addr, size_t bytes, size_t alignment_hint);
+  static void   pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint);
+
+
  public:
   static void init(void);                      // Called before command line parsing
   static jint init_2(void);                    // Called after command line parsing
@@ -236,8 +258,7 @@
   static char*  attempt_reserve_memory_at(size_t bytes, char* addr);
   static void   split_reserved_memory(char *base, size_t size,
                                       size_t split, bool realloc);
-  static bool   commit_memory(char* addr, size_t bytes,
-                              bool executable = false);
+  static bool   commit_memory(char* addr, size_t bytes, bool executable = false);
   static bool   commit_memory(char* addr, size_t size, size_t alignment_hint,
                               bool executable = false);
   static bool   uncommit_memory(char* addr, size_t bytes);
@@ -250,6 +271,7 @@
   static bool   guard_memory(char* addr, size_t bytes);
   static bool   unguard_memory(char* addr, size_t bytes);
   static bool   create_stack_guard_pages(char* addr, size_t bytes);
+  static bool   pd_create_stack_guard_pages(char* addr, size_t bytes);
   static bool   remove_stack_guard_pages(char* addr, size_t bytes);
 
   static char*  map_memory(int fd, const char* file_name, size_t file_offset,
@@ -573,12 +595,15 @@
   static void* thread_local_storage_at(int index);
   static void  free_thread_local_storage(int index);
 
+  // Stack walk
+  static address get_caller_pc(int n = 0);
+
   // General allocation (must be MT-safe)
-  static void* malloc  (size_t size);
-  static void* realloc (void *memblock, size_t size);
-  static void  free    (void *memblock);
+  static void* malloc  (size_t size, MEMFLAGS flags, address caller_pc = 0);
+  static void* realloc (void *memblock, size_t size, MEMFLAGS flags, address caller_pc = 0);
+  static void  free    (void *memblock, MEMFLAGS flags = mtNone);
   static bool  check_heap(bool force = false);      // verify C heap integrity
-  static char* strdup(const char *);  // Like strdup
+  static char* strdup(const char *, MEMFLAGS flags = mtInternal);  // Like strdup
 
 #ifndef PRODUCT
   static julong num_mallocs;         // # of calls to malloc/realloc
--- a/hotspot/src/share/vm/runtime/osThread.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/osThread.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -58,7 +58,7 @@
 // the main thread into its own Thread at will.
 
 
-class OSThread: public CHeapObj {
+class OSThread: public CHeapObj<mtThread> {
   friend class VMStructs;
  private:
   OSThreadStartFunc _start_proc;  // Thread start routine
--- a/hotspot/src/share/vm/runtime/park.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/park.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -141,7 +141,7 @@
 // although Niagara's hash function should help.
 
 void * ParkEvent::operator new (size_t sz) {
-  return (void *) ((intptr_t (CHeapObj::operator new (sz + 256)) + 256) & -256) ;
+  return (void *) ((intptr_t (AllocateHeap(sz + 256, mtInternal, CALLER_PC)) + 256) & -256) ;
 }
 
 void ParkEvent::operator delete (void * a) {
--- a/hotspot/src/share/vm/runtime/perfData.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/perfData.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -81,7 +81,7 @@
 
   const char* prefix = PerfDataManager::ns_to_string(ns);
 
-  _name = NEW_C_HEAP_ARRAY(char, strlen(name) + strlen(prefix) + 2);
+  _name = NEW_C_HEAP_ARRAY(char, strlen(name) + strlen(prefix) + 2, mtInternal);
   assert(_name != NULL && strlen(name) != 0, "invalid name");
 
   if (ns == NULL_NS) {
@@ -111,10 +111,10 @@
 
 PerfData::~PerfData() {
   if (_name != NULL) {
-    FREE_C_HEAP_ARRAY(char, _name);
+    FREE_C_HEAP_ARRAY(char, _name, mtInternal);
   }
   if (is_on_c_heap()) {
-    FREE_C_HEAP_ARRAY(PerfDataEntry, _pdep);
+    FREE_C_HEAP_ARRAY(PerfDataEntry, _pdep, mtInternal);
   }
 }
 
@@ -137,7 +137,7 @@
   if (psmp == NULL) {
     // out of PerfMemory memory resources. allocate on the C heap
     // to avoid vm termination.
-    psmp = NEW_C_HEAP_ARRAY(char, size);
+    psmp = NEW_C_HEAP_ARRAY(char, size, mtInternal);
     _on_c_heap = true;
   }
 
@@ -559,12 +559,12 @@
 
 PerfDataList::PerfDataList(int length) {
 
-  _set = new(ResourceObj::C_HEAP) PerfDataArray(length, true);
+  _set = new(ResourceObj::C_HEAP, mtInternal) PerfDataArray(length, true);
 }
 
 PerfDataList::PerfDataList(PerfDataList* p) {
 
-  _set = new(ResourceObj::C_HEAP) PerfDataArray(p->length(), true);
+  _set = new(ResourceObj::C_HEAP, mtInternal) PerfDataArray(p->length(), true);
 
   _set->appendAll(p->get_impl());
 }
--- a/hotspot/src/share/vm/runtime/perfData.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/perfData.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -240,7 +240,7 @@
  * be removed from the product in the future.
  *
  */
-class PerfData : public CHeapObj {
+class PerfData : public CHeapObj<mtInternal> {
 
   friend class StatSampler;      // for access to protected void sample()
   friend class PerfDataManager;  // for access to protected destructor
@@ -342,7 +342,7 @@
  * invoke the take_sample() method and write the value returned to its
  * appropriate location in the PerfData memory region.
  */
-class PerfLongSampleHelper : public CHeapObj {
+class PerfLongSampleHelper : public CHeapObj<mtInternal> {
   public:
     virtual jlong take_sample() = 0;
 };
@@ -591,7 +591,7 @@
  * some other implementation, as long as that implementation provides
  * a mechanism to iterate over the container by index.
  */
-class PerfDataList : public CHeapObj {
+class PerfDataList : public CHeapObj<mtInternal> {
 
   private:
 
--- a/hotspot/src/share/vm/runtime/perfMemory.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/perfMemory.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -112,7 +112,7 @@
       warning("Could not create PerfData Memory region, reverting to malloc");
     }
 
-    _prologue = NEW_C_HEAP_OBJ(PerfDataPrologue);
+    _prologue = NEW_C_HEAP_OBJ(PerfDataPrologue, mtInternal);
   }
   else {
 
@@ -244,10 +244,10 @@
   if (PerfDataSaveFile != NULL) {
     // dest_file_name stores the validated file name if file_name
     // contains %p which will be replaced by pid.
-    dest_file = NEW_C_HEAP_ARRAY(char, JVM_MAXPATHLEN);
+    dest_file = NEW_C_HEAP_ARRAY(char, JVM_MAXPATHLEN, mtInternal);
     if(!Arguments::copy_expand_pid(PerfDataSaveFile, strlen(PerfDataSaveFile),
                                    dest_file, JVM_MAXPATHLEN)) {
-      FREE_C_HEAP_ARRAY(char, dest_file);
+      FREE_C_HEAP_ARRAY(char, dest_file, mtInternal);
       if (PrintMiscellaneous && Verbose) {
         warning("Invalid performance data file path name specified, "\
                 "fall back to a default name");
@@ -257,7 +257,7 @@
     }
   }
   // create the name of the file for retaining the instrumentation memory.
-  dest_file = NEW_C_HEAP_ARRAY(char, PERFDATA_FILENAME_LEN);
+  dest_file = NEW_C_HEAP_ARRAY(char, PERFDATA_FILENAME_LEN, mtInternal);
   jio_snprintf(dest_file, PERFDATA_FILENAME_LEN,
                "%s_%d", PERFDATA_NAME, os::current_process_id());
 
--- a/hotspot/src/share/vm/runtime/reflectionUtils.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/reflectionUtils.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -59,7 +59,7 @@
 
 
 GrowableArray<FilteredField*> *FilteredFieldsMap::_filtered_fields =
-  new (ResourceObj::C_HEAP) GrowableArray<FilteredField*>(3,true);
+  new (ResourceObj::C_HEAP, mtInternal) GrowableArray<FilteredField*>(3,true);
 
 
 void FilteredFieldsMap::initialize() {
--- a/hotspot/src/share/vm/runtime/safepoint.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/safepoint.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -48,6 +48,7 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/sweeper.hpp"
 #include "runtime/synchronizer.hpp"
+#include "services/memTracker.hpp"
 #include "services/runtimeService.hpp"
 #include "utilities/events.hpp"
 #ifdef TARGET_ARCH_x86
@@ -546,6 +547,10 @@
   if (UseGCLogFileRotation) {
     gclog_or_tty->rotate_log();
   }
+
+  if (MemTracker::is_on()) {
+    MemTracker::sync();
+  }
 }
 
 
@@ -1157,7 +1162,7 @@
     stats_array_size = PrintSafepointStatisticsCount;
   }
   _safepoint_stats = (SafepointStats*)os::malloc(stats_array_size
-                                                 * sizeof(SafepointStats));
+                                                 * sizeof(SafepointStats), mtInternal);
   guarantee(_safepoint_stats != NULL,
             "not enough memory for safepoint instrumentation data");
 
--- a/hotspot/src/share/vm/runtime/safepoint.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/safepoint.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -190,7 +190,7 @@
 };
 
 // State class for a thread suspended at a safepoint
-class ThreadSafepointState: public CHeapObj {
+class ThreadSafepointState: public CHeapObj<mtInternal> {
  public:
   // These states are maintained by VM thread while threads are being brought
   // to a safepoint.  After SafepointSynchronize::end(), they are reset to
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -2117,7 +2117,7 @@
 
 // A simple wrapper class around the calling convention information
 // that allows sharing of adapters for the same calling convention.
-class AdapterFingerPrint : public CHeapObj {
+class AdapterFingerPrint : public CHeapObj<mtCode> {
  private:
   union {
     int  _compact[3];
@@ -2174,7 +2174,7 @@
       ptr = _value._compact;
     } else {
       _length = len;
-      _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length);
+      _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length, mtCode);
       ptr = _value._fingerprint;
     }
 
@@ -2193,7 +2193,7 @@
 
   ~AdapterFingerPrint() {
     if (_length > 0) {
-      FREE_C_HEAP_ARRAY(int, _value._fingerprint);
+      FREE_C_HEAP_ARRAY(int, _value._fingerprint, mtCode);
     }
   }
 
@@ -2251,7 +2251,7 @@
 
 
 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
-class AdapterHandlerTable : public BasicHashtable {
+class AdapterHandlerTable : public BasicHashtable<mtCode> {
   friend class AdapterHandlerTableIterator;
 
  private:
@@ -2265,16 +2265,16 @@
 #endif
 
   AdapterHandlerEntry* bucket(int i) {
-    return (AdapterHandlerEntry*)BasicHashtable::bucket(i);
+    return (AdapterHandlerEntry*)BasicHashtable<mtCode>::bucket(i);
   }
 
  public:
   AdapterHandlerTable()
-    : BasicHashtable(293, sizeof(AdapterHandlerEntry)) { }
+    : BasicHashtable<mtCode>(293, sizeof(AdapterHandlerEntry)) { }
 
   // Create a new entry suitable for insertion in the table
   AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, address c2i_unverified_entry) {
-    AdapterHandlerEntry* entry = (AdapterHandlerEntry*)BasicHashtable::new_entry(fingerprint->compute_hash());
+    AdapterHandlerEntry* entry = (AdapterHandlerEntry*)BasicHashtable<mtCode>::new_entry(fingerprint->compute_hash());
     entry->init(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
     return entry;
   }
@@ -2287,7 +2287,7 @@
 
   void free_entry(AdapterHandlerEntry* entry) {
     entry->deallocate();
-    BasicHashtable::free_entry(entry);
+    BasicHashtable<mtCode>::free_entry(entry);
   }
 
   // Find a entry with the same fingerprint if it exists
@@ -2572,8 +2572,8 @@
 void AdapterHandlerEntry::deallocate() {
   delete _fingerprint;
 #ifdef ASSERT
-  if (_saved_code) FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
-  if (_saved_sig)  FREE_C_HEAP_ARRAY(Basictype, _saved_sig);
+  if (_saved_code) FREE_C_HEAP_ARRAY(unsigned char, _saved_code, mtCode);
+  if (_saved_sig)  FREE_C_HEAP_ARRAY(Basictype, _saved_sig, mtCode);
 #endif
 }
 
@@ -2583,11 +2583,11 @@
 // against other versions.  If the code is captured after relocation
 // then relative instructions won't be equivalent.
 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length, int total_args_passed, BasicType* sig_bt) {
-  _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length);
+  _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
   _code_length = length;
   memcpy(_saved_code, buffer, length);
   _total_args_passed = total_args_passed;
-  _saved_sig = NEW_C_HEAP_ARRAY(BasicType, _total_args_passed);
+  _saved_sig = NEW_C_HEAP_ARRAY(BasicType, _total_args_passed, mtCode);
   memcpy(_saved_sig, sig_bt, _total_args_passed * sizeof(BasicType));
 }
 
@@ -2893,7 +2893,7 @@
   int max_locals = moop->max_locals();
   // Allocate temp buffer, 1 word per local & 2 per active monitor
   int buf_size_words = max_locals + active_monitor_count*2;
-  intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words);
+  intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words, mtCode);
 
   // Copy the locals.  Order is preserved so that loading of longs works.
   // Since there's no GC I can copy the oops blindly.
@@ -2923,7 +2923,7 @@
 JRT_END
 
 JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) )
-  FREE_C_HEAP_ARRAY(intptr_t,buf);
+  FREE_C_HEAP_ARRAY(intptr_t,buf, mtCode);
 JRT_END
 
 bool AdapterHandlerLibrary::contains(CodeBlob* b) {
--- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -610,7 +610,7 @@
 // used by the adapters.  The code generation happens here because it's very
 // similar to what the adapters have to do.
 
-class AdapterHandlerEntry : public BasicHashtableEntry {
+class AdapterHandlerEntry : public BasicHashtableEntry<mtCode> {
   friend class AdapterHandlerTable;
 
  private:
@@ -656,7 +656,7 @@
   AdapterFingerPrint* fingerprint()  { return _fingerprint; }
 
   AdapterHandlerEntry* next() {
-    return (AdapterHandlerEntry*)BasicHashtableEntry::next();
+    return (AdapterHandlerEntry*)BasicHashtableEntry<mtCode>::next();
   }
 
 #ifdef ASSERT
--- a/hotspot/src/share/vm/runtime/stubCodeGenerator.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/stubCodeGenerator.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -36,7 +36,7 @@
 // Currently, code descriptors are simply chained in a linked list,
 // this may have to change if searching becomes too slow.
 
-class StubCodeDesc: public CHeapObj {
+class StubCodeDesc: public CHeapObj<mtCode> {
  protected:
   static StubCodeDesc* _list;                  // the list of all descriptors
   static int           _count;                 // length of list
--- a/hotspot/src/share/vm/runtime/sweeper.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/sweeper.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -228,7 +228,7 @@
 #ifdef ASSERT
     if (LogSweeper && _records == NULL) {
       // Create the ring buffer for the logging code
-      _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries);
+      _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
       memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
     }
 #endif
--- a/hotspot/src/share/vm/runtime/task.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/task.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -35,7 +35,7 @@
 //   ...
 //   pf.disenroll();
 
-class PeriodicTask: public CHeapObj {
+class PeriodicTask: public CHeapObj<mtInternal> {
  public:
   // Useful constants.
   // The interval constants are used to ensure the declared interval
--- a/hotspot/src/share/vm/runtime/thread.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -73,6 +73,7 @@
 #include "runtime/vm_operations.hpp"
 #include "services/attachListener.hpp"
 #include "services/management.hpp"
+#include "services/memTracker.hpp"
 #include "services/threadService.hpp"
 #include "trace/traceEventTypes.hpp"
 #include "utilities/defaultStream.hpp"
@@ -159,6 +160,7 @@
 
 #endif // ndef DTRACE_ENABLED
 
+
 // Class hierarchy
 // - Thread
 //   - VMThread
@@ -168,13 +170,13 @@
 //     - CompilerThread
 
 // ======= Thread ========
-
 // Support for forcing alignment of thread objects for biased locking
-void* Thread::operator new(size_t size) {
+void* Thread::allocate(size_t size, bool throw_excpt, MEMFLAGS flags) {
   if (UseBiasedLocking) {
     const int alignment = markOopDesc::biased_lock_alignment;
     size_t aligned_size = size + (alignment - sizeof(intptr_t));
-    void* real_malloc_addr = CHeapObj::operator new(aligned_size);
+    void* real_malloc_addr = throw_excpt? AllocateHeap(aligned_size, flags, CURRENT_PC)
+                                          : os::malloc(aligned_size, flags, CURRENT_PC);
     void* aligned_addr     = (void*) align_size_up((intptr_t) real_malloc_addr, alignment);
     assert(((uintptr_t) aligned_addr + (uintptr_t) size) <=
            ((uintptr_t) real_malloc_addr + (uintptr_t) aligned_size),
@@ -187,16 +189,17 @@
     ((Thread*) aligned_addr)->_real_malloc_address = real_malloc_addr;
     return aligned_addr;
   } else {
-    return CHeapObj::operator new(size);
+    return throw_excpt? AllocateHeap(size, flags, CURRENT_PC)
+                       : os::malloc(size, flags, CURRENT_PC);
   }
 }
 
 void Thread::operator delete(void* p) {
   if (UseBiasedLocking) {
     void* real_malloc_addr = ((Thread*) p)->_real_malloc_address;
-    CHeapObj::operator delete(real_malloc_addr);
+    FreeHeap(real_malloc_addr, mtThread);
   } else {
-    CHeapObj::operator delete(p);
+    FreeHeap(p, mtThread);
   }
 }
 
@@ -214,8 +217,8 @@
 
   // allocated data structures
   set_osthread(NULL);
-  set_resource_area(new ResourceArea());
-  set_handle_area(new HandleArea(NULL));
+  set_resource_area(new (mtThread)ResourceArea());
+  set_handle_area(new (mtThread) HandleArea(NULL));
   set_active_handles(NULL);
   set_free_handle_block(NULL);
   set_last_handle_mark(NULL);
@@ -306,12 +309,17 @@
 
   // set up any platform-specific state.
   os::initialize_thread();
-
 }
 
 void Thread::record_stack_base_and_size() {
   set_stack_base(os::current_stack_base());
   set_stack_size(os::current_stack_size());
+
+  // record thread's native stack, stack grows downward
+  address vm_base = _stack_base - _stack_size;
+  MemTracker::record_virtual_memory_reserve(vm_base, _stack_size,
+    CURRENT_PC, this);
+  MemTracker::record_virtual_memory_type(vm_base, mtThreadStack);
 }
 
 
@@ -319,6 +327,9 @@
   // Reclaim the objectmonitors from the omFreeList of the moribund thread.
   ObjectSynchronizer::omFlush (this) ;
 
+  MemTracker::record_virtual_memory_release((_stack_base - _stack_size),
+    _stack_size, this);
+
   // deallocate data structures
   delete resource_area();
   // since the handle marks are using the handle area, we have to deallocated the root
@@ -1105,14 +1116,14 @@
 
 NamedThread::~NamedThread() {
   if (_name != NULL) {
-    FREE_C_HEAP_ARRAY(char, _name);
+    FREE_C_HEAP_ARRAY(char, _name, mtThread);
     _name = NULL;
   }
 }
 
 void NamedThread::set_name(const char* format, ...) {
   guarantee(_name == NULL, "Only get to set name once.");
-  _name = NEW_C_HEAP_ARRAY(char, max_name_len);
+  _name = NEW_C_HEAP_ARRAY(char, max_name_len, mtThread);
   guarantee(_name != NULL, "alloc failure");
   va_list ap;
   va_start(ap, format);
@@ -1295,6 +1306,7 @@
   set_monitor_chunks(NULL);
   set_next(NULL);
   set_thread_state(_thread_new);
+  set_recorder(NULL);
   _terminated = _not_terminated;
   _privileged_stack_top = NULL;
   _array_for_gc = NULL;
@@ -1370,6 +1382,7 @@
     _jni_attach_state = _not_attaching_via_jni;
   }
   assert(_deferred_card_mark.is_empty(), "Default MemRegion ctor");
+  _safepoint_visible = false;
 }
 
 bool JavaThread::reguard_stack(address cur_sp) {
@@ -1432,7 +1445,7 @@
   thr_type = entry_point == &compiler_thread_entry ? os::compiler_thread :
                                                      os::java_thread;
   os::create_thread(this, thr_type, stack_sz);
-
+  _safepoint_visible = false;
   // The _osthread may be NULL here because we ran out of memory (too many threads active).
   // We need to throw and OutOfMemoryError - however we cannot do this here because the caller
   // may hold a lock and all locks must be unlocked before throwing the exception (throwing
@@ -1450,6 +1463,11 @@
       tty->print_cr("terminate thread %p", this);
   }
 
+  // Info NMT that this JavaThread is exiting, its memory
+  // recorder should be collected
+  assert(!is_safepoint_visible(), "wrong state");
+  MemTracker::thread_exiting(this);
+
   // JSR166 -- return the parker to the free list
   Parker::Release(_parker);
   _parker = NULL ;
@@ -2892,7 +2910,7 @@
 void JavaThread::popframe_preserve_args(ByteSize size_in_bytes, void* start) {
   assert(_popframe_preserved_args == NULL, "should not wipe out old PopFrame preserved arguments");
   if (in_bytes(size_in_bytes) != 0) {
-    _popframe_preserved_args = NEW_C_HEAP_ARRAY(char, in_bytes(size_in_bytes));
+    _popframe_preserved_args = NEW_C_HEAP_ARRAY(char, in_bytes(size_in_bytes), mtThread);
     _popframe_preserved_args_size = in_bytes(size_in_bytes);
     Copy::conjoint_jbytes(start, _popframe_preserved_args, _popframe_preserved_args_size);
   }
@@ -2914,7 +2932,7 @@
 
 void JavaThread::popframe_free_preserved_args() {
   assert(_popframe_preserved_args != NULL, "should not free PopFrame preserved arguments twice");
-  FREE_C_HEAP_ARRAY(char, (char*) _popframe_preserved_args);
+  FREE_C_HEAP_ARRAY(char, (char*) _popframe_preserved_args, mtThread);
   _popframe_preserved_args = NULL;
   _popframe_preserved_args_size = 0;
 }
@@ -3163,6 +3181,14 @@
   jint os_init_2_result = os::init_2();
   if (os_init_2_result != JNI_OK) return os_init_2_result;
 
+  // intialize TLS
+  ThreadLocalStorage::init();
+
+  // Bootstrap native memory tracking, so it can start recording memory
+  // activities before worker thread is started. This is the first phase
+  // of bootstrapping, VM is currently running in single-thread mode.
+  MemTracker::bootstrap_single_thread();
+
   // Initialize output stream logging
   ostream_init_log();
 
@@ -3182,9 +3208,6 @@
   _number_of_threads = 0;
   _number_of_non_daemon_threads = 0;
 
-  // Initialize TLS
-  ThreadLocalStorage::init();
-
   // Initialize global data structures and create system classes in heap
   vm_init_globals();
 
@@ -3216,6 +3239,9 @@
   // Initialize Java-Level synchronization subsystem
   ObjectMonitor::Initialize() ;
 
+  // Second phase of bootstrapping, VM is about entering multi-thread mode
+  MemTracker::bootstrap_multi_thread();
+
   // Initialize global modules
   jint status = init_globals();
   if (status != JNI_OK) {
@@ -3243,6 +3269,9 @@
     Universe::verify();   // make sure we're starting with a clean slate
   }
 
+  // Fully start NMT
+  MemTracker::start();
+
   // Create the VMThread
   { TraceTime timer("Start VMThread", TraceStartupTime);
     VMThread::create();
@@ -3544,11 +3573,11 @@
       if (library == NULL) {
         const char *sub_msg = " in absolute path, with error: ";
         size_t len = strlen(msg) + strlen(name) + strlen(sub_msg) + strlen(ebuf) + 1;
-        char *buf = NEW_C_HEAP_ARRAY(char, len);
+        char *buf = NEW_C_HEAP_ARRAY(char, len, mtThread);
         jio_snprintf(buf, len, "%s%s%s%s", msg, name, sub_msg, ebuf);
         // If we can't find the agent, exit.
         vm_exit_during_initialization(buf, NULL);
-        FREE_C_HEAP_ARRAY(char, buf);
+        FREE_C_HEAP_ARRAY(char, buf, mtThread);
       }
     } else {
       // Try to load the agent from the standard dll directory
@@ -3562,7 +3591,7 @@
         const char *fmt   = "%s/bin/java %s -Dkernel.background.download=false"
                       " sun.jkernel.DownloadManager -download client_jvm";
         size_t length = strlen(props) + strlen(home) + strlen(fmt) + 1;
-        char *cmd = NEW_C_HEAP_ARRAY(char, length);
+        char *cmd = NEW_C_HEAP_ARRAY(char, length, mtThread);
         jio_snprintf(cmd, length, fmt, home, props);
         int status = os::fork_and_exec(cmd);
         FreeHeap(props);
@@ -3571,7 +3600,7 @@
           vm_exit_during_initialization("fork_and_exec failed: %s",
                                          strerror(errno));
         }
-        FREE_C_HEAP_ARRAY(char, cmd);
+        FREE_C_HEAP_ARRAY(char, cmd, mtThread);
         // when this comes back the instrument.dll should be where it belongs.
         library = os::dll_load(buffer, ebuf, sizeof ebuf);
       }
@@ -3583,11 +3612,11 @@
         if (library == NULL) {
           const char *sub_msg = " on the library path, with error: ";
           size_t len = strlen(msg) + strlen(name) + strlen(sub_msg) + strlen(ebuf) + 1;
-          char *buf = NEW_C_HEAP_ARRAY(char, len);
+          char *buf = NEW_C_HEAP_ARRAY(char, len, mtThread);
           jio_snprintf(buf, len, "%s%s%s%s", msg, name, sub_msg, ebuf);
           // If we can't find the agent, exit.
           vm_exit_during_initialization(buf, NULL);
-          FREE_C_HEAP_ARRAY(char, buf);
+          FREE_C_HEAP_ARRAY(char, buf, mtThread);
         }
       }
     }
@@ -3756,6 +3785,7 @@
 // and VM_Exit op at VM level.
 //
 // Shutdown sequence:
+//   + Shutdown native memory tracking if it is on
 //   + Wait until we are the last non-daemon thread to execute
 //     <-- every thing is still working at this moment -->
 //   + Call java.lang.Shutdown.shutdown(), which will invoke Java level
@@ -3801,6 +3831,10 @@
                          Mutex::_as_suspend_equivalent_flag);
   }
 
+  // Shutdown NMT before exit. Otherwise,
+  // it will run into trouble when system destroys static variables.
+  MemTracker::shutdown(MemTracker::NMT_normal);
+
   // Hang forever on exit if we are reporting an error.
   if (ShowMessageBoxOnError && is_error_reported()) {
     os::infinite_sleep();
@@ -3907,6 +3941,8 @@
     daemon = false;
   }
 
+  p->set_safepoint_visible(true);
+
   ThreadService::add_thread(p, daemon);
 
   // Possible GC point.
@@ -3952,6 +3988,10 @@
     // to do callbacks into the safepoint code. However, the safepoint code is not aware
     // of this thread since it is removed from the queue.
     p->set_terminated_value();
+
+    // Now, this thread is not visible to safepoint
+    p->set_safepoint_visible(false);
+
   } // unlock Threads_lock
 
   // Since Events::log uses a lock, we grab it outside the Threads_lock
--- a/hotspot/src/share/vm/runtime/thread.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -41,6 +41,7 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/threadLocalStorage.hpp"
 #include "runtime/unhandledOops.hpp"
+#include "services/memRecorder.hpp"
 #include "trace/tracing.hpp"
 #include "utilities/exceptions.hpp"
 #include "utilities/top.hpp"
@@ -100,12 +101,16 @@
   //oop       _pending_exception;                // pending exception for current thread
   // const char* _exception_file;                   // file information for exception (debugging only)
   // int         _exception_line;                   // line information for exception (debugging only)
-
+ protected:
   // Support for forcing alignment of thread objects for biased locking
   void*       _real_malloc_address;
  public:
-  void* operator new(size_t size);
+  void* operator new(size_t size) { return allocate(size, true); }
+  void* operator new(size_t size, std::nothrow_t& nothrow_constant) { return allocate(size, false); }
   void  operator delete(void* p);
+
+ protected:
+   static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread);
  private:
 
   // ***************************************************************
@@ -548,7 +553,6 @@
   virtual void print_on_error(outputStream* st, char* buf, int buflen) const;
 
   // Debug-only code
-
 #ifdef ASSERT
  private:
   // Deadlock detection support for Mutex locks. List of locks own by thread.
@@ -1027,9 +1031,15 @@
   bool do_not_unlock_if_synchronized()             { return _do_not_unlock_if_synchronized; }
   void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
 
+  // native memory tracking
+  inline MemRecorder* get_recorder() const          { return (MemRecorder*)_recorder; }
+  inline void         set_recorder(MemRecorder* rc) { _recorder = (volatile MemRecorder*)rc; }
+
+ private:
+  // per-thread memory recorder
+  volatile MemRecorder* _recorder;
 
   // Suspend/resume support for JavaThread
-
  private:
   void set_ext_suspended()       { set_suspend_flag (_ext_suspended);  }
   void clear_ext_suspended()     { clear_suspend_flag(_ext_suspended); }
@@ -1453,6 +1463,18 @@
      return result;
    }
 
+ // NMT (Native memory tracking) support.
+ // This flag helps NMT to determine if this JavaThread will be blocked
+ // at safepoint. If not, ThreadCritical is needed for writing memory records.
+ // JavaThread is only safepoint visible when it is in Threads' thread list,
+ // it is not visible until it is added to the list and becomes invisible
+ // once it is removed from the list.
+ public:
+  bool is_safepoint_visible() const { return _safepoint_visible; }
+  void set_safepoint_visible(bool visible) { _safepoint_visible = visible; }
+ private:
+  bool _safepoint_visible;
+
   // Static operations
  public:
   // Returns the running thread as a JavaThread
--- a/hotspot/src/share/vm/runtime/unhandledOops.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/unhandledOops.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -37,7 +37,7 @@
 
 UnhandledOops::UnhandledOops(Thread* thread) {
   _thread = thread;
-  _oop_list = new (ResourceObj::C_HEAP)
+  _oop_list = new (ResourceObj::C_HEAP, mtInternal)
                     GrowableArray<UnhandledOopEntry>(free_list_size, true);
   _level = 0;
 }
--- a/hotspot/src/share/vm/runtime/vframeArray.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/vframeArray.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -443,7 +443,7 @@
   // Allocate the vframeArray
   vframeArray * result = (vframeArray*) AllocateHeap(sizeof(vframeArray) + // fixed part
                                                      sizeof(vframeArrayElement) * (chunk->length() - 1), // variable part
-                                                     "vframeArray::allocate");
+                                                     mtCompiler);
   result->_frames = chunk->length();
   result->_owner_thread = thread;
   result->_sender = sender;
--- a/hotspot/src/share/vm/runtime/vframeArray.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/vframeArray.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -108,7 +108,7 @@
 // but it does make debugging easier even if we can't look
 // at the data in each vframeElement
 
-class vframeArray: public CHeapObj {
+class vframeArray: public CHeapObj<mtCompiler> {
   friend class VMStructs;
 
  private:
--- a/hotspot/src/share/vm/runtime/vframe_hp.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/vframe_hp.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -154,7 +154,7 @@
   } else {
     // No deferred updates pending for this thread.
     // allocate in C heap
-    deferred =  new(ResourceObj::C_HEAP) GrowableArray<jvmtiDeferredLocalVariableSet*> (1, true);
+    deferred =  new(ResourceObj::C_HEAP, mtCompiler) GrowableArray<jvmtiDeferredLocalVariableSet*> (1, true);
     thread()->set_deferred_locals(deferred);
   }
   deferred->push(new jvmtiDeferredLocalVariableSet(method(), bci(), fr().id()));
@@ -323,7 +323,7 @@
   _bci = bci;
   _id = id;
   // Alway will need at least one, must be on C heap
-  _locals = new(ResourceObj::C_HEAP) GrowableArray<jvmtiDeferredLocalVariable*> (1, true);
+  _locals = new(ResourceObj::C_HEAP, mtCompiler) GrowableArray<jvmtiDeferredLocalVariable*> (1, true);
 }
 
 jvmtiDeferredLocalVariableSet::~jvmtiDeferredLocalVariableSet() {
--- a/hotspot/src/share/vm/runtime/vframe_hp.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/vframe_hp.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -89,7 +89,7 @@
 // any updated locals.
 
 class jvmtiDeferredLocalVariable;
-class jvmtiDeferredLocalVariableSet : public CHeapObj {
+class jvmtiDeferredLocalVariableSet : public CHeapObj<mtCompiler> {
 private:
 
   methodOop _method;           // must be GC'd
@@ -119,7 +119,7 @@
 
 };
 
-class jvmtiDeferredLocalVariable : public CHeapObj {
+class jvmtiDeferredLocalVariable : public CHeapObj<mtCompiler> {
   public:
 
     jvmtiDeferredLocalVariable(int index, BasicType type, jvalue value);
--- a/hotspot/src/share/vm/runtime/virtualspace.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/virtualspace.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -26,6 +26,7 @@
 #include "oops/markOop.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/virtualspace.hpp"
+#include "services/memTracker.hpp"
 #ifdef TARGET_OS_FAMILY_linux
 # include "os_linux.inline.hpp"
 #endif
@@ -489,6 +490,10 @@
                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
                  Universe::narrow_oop_use_implicit_null_checks()) ?
                   lcm(os::vm_page_size(), alignment) : 0) {
+  if (base() > 0) {
+    MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
+  }
+
   // Only reserved space for the java heap should have a noaccess_prefix
   // if using compressed oops.
   protect_noaccess_prefix(size);
@@ -504,6 +509,10 @@
                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
                  Universe::narrow_oop_use_implicit_null_checks()) ?
                   lcm(os::vm_page_size(), prefix_align) : 0) {
+  if (base() > 0) {
+    MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
+  }
+
   protect_noaccess_prefix(prefix_size+suffix_size);
 }
 
@@ -513,6 +522,7 @@
                                      size_t rs_align,
                                      bool large) :
   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
+  MemTracker::record_virtual_memory_type((address)base(), mtCode);
 }
 
 // VirtualSpace
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -235,7 +235,6 @@
 #ifndef REG_COUNT
   #define REG_COUNT 0
 #endif
-
 // whole purpose of this function is to work around bug c++/27724 in gcc 4.1.1
 // with optimization turned on it doesn't affect produced code
 static inline uint64_t cast_uint64_t(size_t x)
@@ -244,6 +243,16 @@
 }
 
 
+typedef HashtableEntry<intptr_t, mtInternal>  IntptrHashtableEntry;
+typedef Hashtable<intptr_t, mtInternal>       IntptrHashtable;
+typedef Hashtable<Symbol*, mtSymbol>          SymbolHashtable;
+typedef HashtableEntry<Symbol*, mtClass>      SymbolHashtableEntry;
+typedef Hashtable<oop, mtSymbol>              StringHashtable;
+typedef TwoOopHashtable<klassOop, mtClass>    klassOopTwoOopHashtable;
+typedef Hashtable<klassOop, mtClass>          klassOopHashtable;
+typedef HashtableEntry<klassOop, mtClass>     klassHashtableEntry;
+typedef TwoOopHashtable<Symbol*, mtClass>     SymbolTwoOopHashtable;
+
 //--------------------------------------------------------------------------------
 // VM_STRUCTS
 //
@@ -711,26 +720,26 @@
   /* HashtableBucket */                                                                                                              \
   /*******************/                                                                                                              \
                                                                                                                                      \
-  nonstatic_field(HashtableBucket,             _entry,                                        BasicHashtableEntry*)                  \
+  nonstatic_field(HashtableBucket<mtInternal>,  _entry,                                        BasicHashtableEntry<mtInternal>*)     \
                                                                                                                                      \
   /******************/                                                                                                               \
   /* HashtableEntry */                                                                                                               \
   /******************/                                                                                                               \
                                                                                                                                      \
-  nonstatic_field(BasicHashtableEntry,         _next,                                         BasicHashtableEntry*)                  \
-  nonstatic_field(BasicHashtableEntry,         _hash,                                         unsigned int)                          \
-  nonstatic_field(HashtableEntry<intptr_t>,    _literal,                                      intptr_t) \
+  nonstatic_field(BasicHashtableEntry<mtInternal>, _next,                                     BasicHashtableEntry<mtInternal>*)      \
+  nonstatic_field(BasicHashtableEntry<mtInternal>, _hash,                                     unsigned int)                          \
+  nonstatic_field(IntptrHashtableEntry,            _literal,                                  intptr_t)                              \
                                                                                                                                      \
   /*************/                                                                                                                    \
   /* Hashtable */                                                                                                                    \
   /*************/                                                                                                                    \
                                                                                                                                      \
-  nonstatic_field(BasicHashtable,              _table_size,                                   int)                                   \
-  nonstatic_field(BasicHashtable,              _buckets,                                      HashtableBucket*)                      \
-  nonstatic_field(BasicHashtable,              _free_list,                                    BasicHashtableEntry*)                  \
-  nonstatic_field(BasicHashtable,              _first_free_entry,                             char*)                                 \
-  nonstatic_field(BasicHashtable,              _end_block,                                    char*)                                 \
-  nonstatic_field(BasicHashtable,              _entry_size,                                   int)                                   \
+  nonstatic_field(BasicHashtable<mtInternal>, _table_size,                                   int)                                   \
+  nonstatic_field(BasicHashtable<mtInternal>, _buckets,                                      HashtableBucket<mtInternal>*)          \
+  nonstatic_field(BasicHashtable<mtInternal>, _free_list,                                    BasicHashtableEntry<mtInternal>*)      \
+  nonstatic_field(BasicHashtable<mtInternal>, _first_free_entry,                             char*)                                 \
+  nonstatic_field(BasicHashtable<mtInternal>, _end_block,                                    char*)                                 \
+  nonstatic_field(BasicHashtable<mtInternal>, _entry_size,                                   int)                                   \
                                                                                                                                      \
   /*******************/                                                                                                              \
   /* DictionaryEntry */                                                                                                              \
@@ -1538,20 +1547,20 @@
   /* SymbolTable, SystemDictionary */                                     \
   /*********************************/                                     \
                                                                           \
-  declare_toplevel_type(BasicHashtable)                                   \
-    declare_type(Hashtable<intptr_t>, BasicHashtable)                     \
-  declare_type(SymbolTable, Hashtable<Symbol*>)                           \
-  declare_type(StringTable, Hashtable<oop>)                               \
-    declare_type(LoaderConstraintTable, Hashtable<klassOop>)              \
-    declare_type(TwoOopHashtable<klassOop>, Hashtable<klassOop>)          \
-    declare_type(Dictionary, TwoOopHashtable<klassOop>)                   \
-    declare_type(PlaceholderTable, TwoOopHashtable<Symbol*>)              \
-  declare_toplevel_type(BasicHashtableEntry)                              \
-  declare_type(HashtableEntry<intptr_t>, BasicHashtableEntry)             \
-    declare_type(DictionaryEntry, HashtableEntry<klassOop>)               \
-    declare_type(PlaceholderEntry, HashtableEntry<Symbol*>)               \
-    declare_type(LoaderConstraintEntry, HashtableEntry<klassOop>)         \
-  declare_toplevel_type(HashtableBucket)                                  \
+  declare_toplevel_type(BasicHashtable<mtInternal>)                       \
+    declare_type(IntptrHashtable, BasicHashtable<mtInternal>)             \
+  declare_type(SymbolTable, SymbolHashtable)                              \
+  declare_type(StringTable, StringHashtable)                              \
+    declare_type(LoaderConstraintTable, klassOopHashtable)                \
+    declare_type(klassOopTwoOopHashtable, klassOopHashtable)              \
+    declare_type(Dictionary, klassOopTwoOopHashtable)                     \
+    declare_type(PlaceholderTable, SymbolTwoOopHashtable)                 \
+  declare_toplevel_type(BasicHashtableEntry<mtInternal>)                  \
+  declare_type(IntptrHashtableEntry, BasicHashtableEntry<mtInternal>)     \
+    declare_type(DictionaryEntry, klassHashtableEntry)                    \
+    declare_type(PlaceholderEntry, SymbolHashtableEntry)                  \
+    declare_type(LoaderConstraintEntry, klassHashtableEntry)              \
+  declare_toplevel_type(HashtableBucket<mtInternal>)                      \
   declare_toplevel_type(SystemDictionary)                                 \
   declare_toplevel_type(vmSymbols)                                        \
   declare_toplevel_type(ProtectionDomainEntry)                            \
--- a/hotspot/src/share/vm/runtime/vmThread.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/vmThread.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -46,7 +46,7 @@
 // Encapsulates both queue management and
 // and priority policy
 //
-class VMOperationQueue : public CHeapObj {
+class VMOperationQueue : public CHeapObj<mtInternal> {
  private:
   enum Priorities {
      SafepointPriority, // Highest priority (operation executed at a safepoint)
--- a/hotspot/src/share/vm/runtime/vm_operations.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/runtime/vm_operations.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -96,7 +96,7 @@
   template(JFRCheckpoint)                         \
   template(Exit)                                  \
 
-class VM_Operation: public CHeapObj {
+class VM_Operation: public CHeapObj<mtInternal> {
  public:
   enum Mode {
     _safepoint,       // blocking,        safepoint, vm_op C-heap allocated
--- a/hotspot/src/share/vm/services/attachListener.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/services/attachListener.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -320,7 +320,7 @@
   }
   bool res = CommandLineFlags::ccstrAtPut((char*)name, &value, ATTACH_ON_DEMAND);
   if (res) {
-    FREE_C_HEAP_ARRAY(char, value);
+    FREE_C_HEAP_ARRAY(char, value, mtInternal);
   } else {
     out->print_cr("setting flag %s failed", name);
   }
--- a/hotspot/src/share/vm/services/attachListener.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/services/attachListener.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -98,7 +98,7 @@
 };
 
 #ifndef SERVICES_KERNEL
-class AttachOperation: public CHeapObj {
+class AttachOperation: public CHeapObj<mtInternal> {
  public:
   enum {
     name_length_max = 16,       // maximum length of  name
--- a/hotspot/src/share/vm/services/diagnosticArgument.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/services/diagnosticArgument.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -140,7 +140,7 @@
   if (str == NULL) {
     _value = NULL;
   } else {
-    _value = NEW_C_HEAP_ARRAY(char, len+1);
+    _value = NEW_C_HEAP_ARRAY(char, len+1, mtInternal);
     strncpy(_value, str, len);
     _value[len] = 0;
   }
@@ -159,7 +159,7 @@
 
 template <> void DCmdArgument<char*>::destroy_value() {
   if (_value != NULL) {
-    FREE_C_HEAP_ARRAY(char, _value);
+    FREE_C_HEAP_ARRAY(char, _value, mtInternal);
     set_value(NULL);
   }
 }
--- a/hotspot/src/share/vm/services/diagnosticArgument.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/services/diagnosticArgument.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -31,17 +31,17 @@
 #include "runtime/thread.hpp"
 #include "utilities/exceptions.hpp"
 
-class StringArrayArgument : public CHeapObj {
+class StringArrayArgument : public CHeapObj<mtInternal> {
 private:
   GrowableArray<char*>* _array;
 public:
   StringArrayArgument() {
-    _array = new(ResourceObj::C_HEAP)GrowableArray<char *>(32, true);
+    _array = new(ResourceObj::C_HEAP, mtInternal)GrowableArray<char *>(32, true);
     assert(_array != NULL, "Sanity check");
   }
   void add(const char* str, size_t len) {
     if (str != NULL) {
-      char* ptr = NEW_C_HEAP_ARRAY(char, len+1);
+      char* ptr = NEW_C_HEAP_ARRAY(char, len+1, mtInternal);
       strncpy(ptr, str, len);
       ptr[len] = 0;
       _array->append(ptr);
@@ -53,7 +53,7 @@
   ~StringArrayArgument() {
     for (int i=0; i<_array->length(); i++) {
       if(_array->at(i) != NULL) { // Safety check
-        FREE_C_HEAP_ARRAY(char, _array->at(i));
+        FREE_C_HEAP_ARRAY(char, _array->at(i), mtInternal);
       }
     }
     delete _array;
--- a/hotspot/src/share/vm/services/diagnosticFramework.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/services/diagnosticFramework.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -310,7 +310,7 @@
 // manages the status of the diagnostic command (hidden, enabled). A DCmdFactory
 // has to be registered to make the diagnostic command available (see
 // management.cpp)
-class DCmdFactory: public CHeapObj {
+class DCmdFactory: public CHeapObj<mtInternal> {
 private:
   static Mutex*       _dcmdFactory_lock;
   // Pointer to the next factory in the singly-linked list of registered
@@ -368,7 +368,7 @@
     DCmdFactory(DCmdClass::num_arguments(), enabled, hidden) { }
   // Returns a C-heap allocated instance
   virtual DCmd* create_Cheap_instance(outputStream* output) {
-    return new (ResourceObj::C_HEAP) DCmdClass(output, true);
+    return new (ResourceObj::C_HEAP, mtInternal) DCmdClass(output, true);
   }
   // Returns a resourceArea allocated instance
   virtual DCmd* create_resource_instance(outputStream* output) {
--- a/hotspot/src/share/vm/services/gcNotifier.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/services/gcNotifier.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -45,7 +45,7 @@
   // GC may occur between now and the creation of the notification
   int num_pools = MemoryService::num_memory_pools();
   // stat is deallocated inside GCNotificationRequest
-  GCStatInfo* stat = new(ResourceObj::C_HEAP) GCStatInfo(num_pools);
+  GCStatInfo* stat = new(ResourceObj::C_HEAP, mtGC) GCStatInfo(num_pools);
   mgr->get_last_gc_stat(stat);
   GCNotificationRequest *request = new GCNotificationRequest(os::javaTimeMillis(),mgr,action,cause,stat);
   addRequest(request);
--- a/hotspot/src/share/vm/services/gcNotifier.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/services/gcNotifier.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -30,7 +30,7 @@
 #include "services/memoryService.hpp"
 #include "services/memoryManager.hpp"
 
-class GCNotificationRequest : public CHeapObj {
+class GCNotificationRequest : public CHeapObj<mtInternal> {
   friend class GCNotifier;
   GCNotificationRequest *next;
   jlong timestamp;
--- a/hotspot/src/share/vm/services/heapDumper.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/services/heapDumper.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -436,7 +436,7 @@
   // sufficient memory then reduce size until we can allocate something.
   _size = io_buffer_size;
   do {
-    _buffer = (char*)os::malloc(_size);
+    _buffer = (char*)os::malloc(_size, mtInternal);
     if (_buffer == NULL) {
       _size = _size >> 1;
     }
@@ -1405,7 +1405,7 @@
     _gc_before_heap_dump = gc_before_heap_dump;
     _is_segmented_dump = false;
     _dump_start = (jlong)-1;
-    _klass_map = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(INITIAL_CLASS_COUNT, true);
+    _klass_map = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Klass*>(INITIAL_CLASS_COUNT, true);
     _stack_traces = NULL;
     _num_threads = 0;
     if (oome) {
@@ -1426,7 +1426,7 @@
       for (int i=0; i < _num_threads; i++) {
         delete _stack_traces[i];
       }
-      FREE_C_HEAP_ARRAY(ThreadStackTrace*, _stack_traces);
+      FREE_C_HEAP_ARRAY(ThreadStackTrace*, _stack_traces, mtInternal);
     }
     delete _klass_map;
   }
@@ -1806,7 +1806,7 @@
   writer()->write_u4(0);                    // thread number
   writer()->write_u4(0);                    // frame count
 
-  _stack_traces = NEW_C_HEAP_ARRAY(ThreadStackTrace*, Threads::number_of_threads());
+  _stack_traces = NEW_C_HEAP_ARRAY(ThreadStackTrace*, Threads::number_of_threads(), mtInternal);
   int frame_serial_num = 0;
   for (JavaThread* thread = Threads::first(); thread != NULL ; thread = thread->next()) {
     oop threadObj = thread->threadObj();
@@ -2005,7 +2005,7 @@
                    dump_file_name, os::current_process_id(), dump_file_ext);
     }
     const size_t len = strlen(base_path) + 1;
-    my_path = (char*)os::malloc(len);
+    my_path = (char*)os::malloc(len, mtInternal);
     if (my_path == NULL) {
       warning("Cannot create heap dump file.  Out of system memory.");
       return;
@@ -2014,7 +2014,7 @@
   } else {
     // Append a sequence number id for dumps following the first
     const size_t len = strlen(base_path) + max_digit_chars + 2; // for '.' and \0
-    my_path = (char*)os::malloc(len);
+    my_path = (char*)os::malloc(len, mtInternal);
     if (my_path == NULL) {
       warning("Cannot create heap dump file.  Out of system memory.");
       return;
--- a/hotspot/src/share/vm/services/lowMemoryDetector.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/services/lowMemoryDetector.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -63,7 +63,7 @@
 class OopClosure;
 class MemoryPool;
 
-class ThresholdSupport : public CHeapObj {
+class ThresholdSupport : public CHeapObj<mtInternal> {
  private:
   bool            _support_high_threshold;
   bool            _support_low_threshold;
@@ -112,7 +112,7 @@
   }
 };
 
-class SensorInfo : public CHeapObj {
+class SensorInfo : public CHeapObj<mtInternal> {
 private:
   instanceOop     _sensor_obj;
   bool            _sensor_on;
--- a/hotspot/src/share/vm/services/management.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/services/management.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -47,6 +47,7 @@
 #include "services/jmm.h"
 #include "services/lowMemoryDetector.hpp"
 #include "services/gcNotifier.hpp"
+#include "services/nmtDCmd.hpp"
 #include "services/management.hpp"
 #include "services/memoryManager.hpp"
 #include "services/memoryPool.hpp"
@@ -121,6 +122,7 @@
   // Registration of the diagnostic commands
   DCmdRegistrant::register_dcmds();
   DCmdRegistrant::register_dcmds_ext();
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<NMTDCmd>(true, false));
 }
 
 void Management::initialize(TRAPS) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/memBaseline.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -0,0 +1,387 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "memory/allocation.hpp"
+#include "services/memBaseline.hpp"
+#include "services/memTracker.hpp"
+
+MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
+  {mtJavaHeap,   "Java Heap"},
+  {mtClass,      "Class"},
+  {mtThreadStack,"Thread Stack"},
+  {mtThread,     "Thread"},
+  {mtCode,       "Code"},
+  {mtGC,         "GC"},
+  {mtCompiler,   "Compiler"},
+  {mtInternal,   "Internal"},
+  {mtOther,      "Other"},
+  {mtSymbol,     "Symbol"},
+  {mtNMT,        "Memory Tracking"},
+  {mtChunk,      "Pooled Free Chunks"},
+  {mtNone,       "Unknown"}  // It can happen when type tagging records are lagging
+                             // behind
+};
+
+MemBaseline::MemBaseline() {
+  _baselined = false;
+
+  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
+    _malloc_data[index].set_type(MemType2NameMap[index]._flag);
+    _vm_data[index].set_type(MemType2NameMap[index]._flag);
+    _arena_data[index].set_type(MemType2NameMap[index]._flag);
+  }
+
+  _malloc_cs = NULL;
+  _vm_cs = NULL;
+
+  _number_of_classes = 0;
+  _number_of_threads = 0;
+}
+
+
+void MemBaseline::clear() {
+  if (_malloc_cs != NULL) {
+    delete _malloc_cs;
+    _malloc_cs = NULL;
+  }
+
+  if (_vm_cs != NULL) {
+    delete _vm_cs;
+    _vm_cs = NULL;
+  }
+
+  reset();
+}
+
+
+void MemBaseline::reset() {
+  _baselined = false;
+  _total_vm_reserved = 0;
+  _total_vm_committed = 0;
+  _total_malloced = 0;
+  _number_of_classes = 0;
+
+  if (_malloc_cs != NULL) _malloc_cs->clear();
+  if (_vm_cs != NULL) _vm_cs->clear();
+
+  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
+    _malloc_data[index].clear();
+    _vm_data[index].clear();
+    _arena_data[index].clear();
+  }
+}
+
+MemBaseline::~MemBaseline() {
+  if (_malloc_cs != NULL) {
+    delete _malloc_cs;
+  }
+
+  if (_vm_cs != NULL) {
+    delete _vm_cs;
+  }
+}
+
+// baseline malloc'd memory records, generate overall summary and summaries by
+// memory types
+bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
+  MemPointerArrayIteratorImpl mItr((MemPointerArray*)malloc_records);
+  MemPointerRecord* mptr = (MemPointerRecord*)mItr.current();
+  size_t used_arena_size = 0;
+  int index;
+  while (mptr != NULL) {
+    index = flag2index(FLAGS_TO_MEMORY_TYPE(mptr->flags()));
+    size_t size = mptr->size();
+    _total_malloced += size;
+    _malloc_data[index].inc(size);
+    if (MemPointerRecord::is_arena_record(mptr->flags())) {
+      // see if arena size record present
+      MemPointerRecord* next_p = (MemPointerRecordEx*)mItr.peek_next();
+      if (MemPointerRecord::is_arena_size_record(next_p->flags())) {
+        assert(next_p->is_size_record_of_arena(mptr), "arena records do not match");
+        size = next_p->size();
+        _arena_data[index].inc(size);
+        used_arena_size += size;
+        mItr.next();
+      }
+    }
+    mptr = (MemPointerRecordEx*)mItr.next();
+  }
+
+  // substract used arena size to get size of arena chunk in free list
+  index = flag2index(mtChunk);
+  _malloc_data[index].reduce(used_arena_size);
+  // we really don't know how many chunks in free list, so just set to
+  // 0
+  _malloc_data[index].overwrite_counter(0);
+
+  return true;
+}
+
+// baseline mmap'd memory records, generate overall summary and summaries by
+// memory types
+bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
+  MemPointerArrayIteratorImpl vItr((MemPointerArray*)vm_records);
+  VMMemRegion* vptr = (VMMemRegion*)vItr.current();
+  int index;
+  while (vptr != NULL) {
+    index = flag2index(FLAGS_TO_MEMORY_TYPE(vptr->flags()));
+
+    // we use the number of thread stack to count threads
+    if (IS_MEMORY_TYPE(vptr->flags(), mtThreadStack)) {
+      _number_of_threads ++;
+    }
+    _total_vm_reserved += vptr->reserved_size();
+    _total_vm_committed += vptr->committed_size();
+    _vm_data[index].inc(vptr->reserved_size(), vptr->committed_size());
+    vptr = (VMMemRegion*)vItr.next();
+  }
+  return true;
+}
+
+// baseline malloc'd memory by callsites, but only the callsites with memory allocation
+// over 1KB are stored.
+bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
+  assert(MemTracker::track_callsite(), "detail tracking is off");
+
+  MemPointerArrayIteratorImpl mItr((MemPointerArray*)malloc_records);
+  MemPointerRecordEx* mptr = (MemPointerRecordEx*)mItr.current();
+  MallocCallsitePointer mp;
+
+  if (_malloc_cs == NULL) {
+    _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
+    // out of native memory
+    if (_malloc_cs == NULL) {
+      return false;
+    }
+  } else {
+    _malloc_cs->clear();
+  }
+
+  // baseline memory that is totaled over 1 KB
+  while (mptr != NULL) {
+    if (!MemPointerRecord::is_arena_size_record(mptr->flags())) {
+      // skip thread stacks
+      if (!IS_MEMORY_TYPE(mptr->flags(), mtThreadStack)) {
+        if (mp.addr() != mptr->pc()) {
+          if ((mp.amount()/K) > 0) {
+            if (!_malloc_cs->append(&mp)) {
+              return false;
+            }
+          }
+          mp = MallocCallsitePointer(mptr->pc());
+        }
+        mp.inc(mptr->size());
+      }
+    }
+    mptr = (MemPointerRecordEx*)mItr.next();
+  }
+
+  if (mp.addr() != 0 && (mp.amount()/K) > 0) {
+    if (!_malloc_cs->append(&mp)) {
+      return false;
+    }
+  }
+  return true;
+}
+
+// baseline mmap'd memory by callsites
+bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
+  assert(MemTracker::track_callsite(), "detail tracking is off");
+
+  VMCallsitePointer vp;
+  MemPointerArrayIteratorImpl vItr((MemPointerArray*)vm_records);
+  VMMemRegionEx* vptr = (VMMemRegionEx*)vItr.current();
+
+  if (_vm_cs == NULL) {
+    _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
+    if (_vm_cs == NULL) {
+      return false;
+    }
+  } else {
+    _vm_cs->clear();
+  }
+
+  while (vptr != NULL) {
+    if (vp.addr() != vptr->pc()) {
+      if (!_vm_cs->append(&vp)) {
+        return false;
+      }
+      vp = VMCallsitePointer(vptr->pc());
+    }
+    vp.inc(vptr->size(), vptr->committed_size());
+    vptr = (VMMemRegionEx*)vItr.next();
+  }
+  if (vp.addr() != 0) {
+    if (!_vm_cs->append(&vp)) {
+      return false;
+    }
+  }
+  return true;
+}
+
+// baseline a snapshot. If summary_only = false, memory usages aggregated by
+// callsites are also baselined.
+bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
+  MutexLockerEx snapshot_locker(snapshot._lock, true);
+  reset();
+  _baselined = baseline_malloc_summary(snapshot._alloc_ptrs) &&
+               baseline_vm_summary(snapshot._vm_ptrs);
+  _number_of_classes = SystemDictionary::number_of_classes();
+
+  if (!summary_only && MemTracker::track_callsite() && _baselined) {
+    ((MemPointerArray*)snapshot._alloc_ptrs)->sort((FN_SORT)malloc_sort_by_pc);
+    ((MemPointerArray*)snapshot._vm_ptrs)->sort((FN_SORT)vm_sort_by_pc);
+    _baselined =  baseline_malloc_details(snapshot._alloc_ptrs) &&
+      baseline_vm_details(snapshot._vm_ptrs);
+    ((MemPointerArray*)snapshot._alloc_ptrs)->sort((FN_SORT)malloc_sort_by_addr);
+    ((MemPointerArray*)snapshot._vm_ptrs)->sort((FN_SORT)vm_sort_by_addr);
+  }
+  return _baselined;
+}
+
+
+int MemBaseline::flag2index(MEMFLAGS flag) const {
+  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
+    if (MemType2NameMap[index]._flag == flag) {
+      return index;
+    }
+  }
+  assert(false, "no type");
+  return -1;
+}
+
+const char* MemBaseline::type2name(MEMFLAGS type) {
+  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
+    if (MemType2NameMap[index]._flag == type) {
+      return MemType2NameMap[index]._name;
+    }
+  }
+  assert(false, "no type");
+  return NULL;
+}
+
+
+MemBaseline& MemBaseline::operator=(const MemBaseline& other) {
+  _total_malloced = other._total_malloced;
+  _total_vm_reserved = other._total_vm_reserved;
+  _total_vm_committed = other._total_vm_committed;
+
+  _baselined = other._baselined;
+  _number_of_classes = other._number_of_classes;
+
+  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
+    _malloc_data[index] = other._malloc_data[index];
+    _vm_data[index] = other._vm_data[index];
+    _arena_data[index] = other._arena_data[index];
+  }
+
+  if (MemTracker::track_callsite()) {
+    assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory");
+    assert(other._malloc_cs != NULL && other._vm_cs != NULL,
+           "not properly baselined");
+    _malloc_cs->clear();
+    _vm_cs->clear();
+    int index;
+    for (index = 0; index < other._malloc_cs->length(); index ++) {
+      _malloc_cs->append(other._malloc_cs->at(index));
+    }
+
+    for (index = 0; index < other._vm_cs->length(); index ++) {
+      _vm_cs->append(other._vm_cs->at(index));
+    }
+  }
+  return *this;
+}
+
+/* compare functions for sorting */
+
+// sort snapshot malloc'd records in callsite pc order
+int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) {
+  assert(MemTracker::track_callsite(),"Just check");
+  const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1;
+  const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2;
+  return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
+}
+
+// sort baselined malloc'd records in size order
+int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) {
+  assert(MemTracker::is_on(), "Just check");
+  const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
+  const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
+  return UNSIGNED_COMPARE(mp2->amount(), mp1->amount());
+}
+
+// sort baselined malloc'd records in callsite pc order
+int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
+  assert(MemTracker::is_on(), "Just check");
+  const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
+  const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
+  return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
+}
+
+// sort snapshot mmap'd records in callsite pc order
+int MemBaseline::vm_sort_by_pc(const void* p1, const void* p2) {
+  assert(MemTracker::track_callsite(),"Just check");
+  const VMMemRegionEx* mp1 = (const VMMemRegionEx*)p1;
+  const VMMemRegionEx* mp2 = (const VMMemRegionEx*)p2;
+  return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
+}
+
+// sort baselined mmap'd records in size (reserved size) order
+int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
+  assert(MemTracker::is_on(), "Just check");
+  const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
+  const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
+  return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount());
+}
+
+// sort baselined mmap'd records in callsite pc order
+int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) {
+  assert(MemTracker::is_on(), "Just check");
+  const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
+  const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
+  return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
+}
+
+
+// sort snapshot malloc'd records in memory block address order
+int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
+  assert(MemTracker::is_on(), "Just check");
+  const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
+  const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
+  int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
+  assert(delta != 0, "dup pointer");
+  return delta;
+}
+
+// sort snapshot mmap'd records in memory block address order
+int MemBaseline::vm_sort_by_addr(const void* p1, const void* p2) {
+  assert(MemTracker::is_on(), "Just check");
+  const VMMemRegion* mp1 = (const VMMemRegion*)p1;
+  const VMMemRegion* mp2 = (const VMMemRegion*)p2;
+  int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
+  assert(delta != 0, "dup pointer");
+  return delta;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/memBaseline.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -0,0 +1,447 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_MEM_BASELINE_HPP
+#define SHARE_VM_SERVICES_MEM_BASELINE_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/mutex.hpp"
+#include "services/memPtr.hpp"
+#include "services/memSnapshot.hpp"
+
+// compare unsigned number
+#define UNSIGNED_COMPARE(a, b)  ((a > b) ? 1 : ((a == b) ? 0 : -1))
+
+/*
+ * MallocCallsitePointer and VMCallsitePointer are used
+ * to baseline memory blocks with their callsite information.
+ * They are only available when detail tracking is turned
+ * on.
+ */
+
+/* baselined malloc record aggregated by callsite */
+class MallocCallsitePointer : public MemPointer {
+ private:
+  size_t    _count;   // number of malloc invocation from this callsite
+  size_t    _amount;  // total amount of memory malloc-ed from this callsite
+
+ public:
+  MallocCallsitePointer() {
+    _count = 0;
+    _amount = 0;
+  }
+
+  MallocCallsitePointer(address pc) : MemPointer(pc) {
+    _count = 0;
+    _amount = 0;
+  }
+
+  MallocCallsitePointer& operator=(const MallocCallsitePointer& p) {
+    MemPointer::operator=(p);
+    _count = p.count();
+    _amount = p.amount();
+    return *this;
+  }
+
+  inline void inc(size_t size) {
+    _count ++;
+    _amount += size;
+  };
+
+  inline size_t count() const {
+    return _count;
+  }
+
+  inline size_t amount() const {
+    return _amount;
+  }
+};
+
+// baselined virtual memory record aggregated by callsite
+class VMCallsitePointer : public MemPointer {
+ private:
+  size_t     _count;              // number of invocation from this callsite
+  size_t     _reserved_amount;    // total reserved amount
+  size_t     _committed_amount;   // total committed amount
+
+ public:
+  VMCallsitePointer() {
+    _count = 0;
+    _reserved_amount = 0;
+    _committed_amount = 0;
+  }
+
+  VMCallsitePointer(address pc) : MemPointer(pc) {
+    _count = 0;
+    _reserved_amount = 0;
+    _committed_amount = 0;
+  }
+
+  VMCallsitePointer& operator=(const VMCallsitePointer& p) {
+    MemPointer::operator=(p);
+    _count = p.count();
+    _reserved_amount = p.reserved_amount();
+    _committed_amount = p.committed_amount();
+    return *this;
+  }
+
+  inline void inc(size_t reserved, size_t committed) {
+    _count ++;
+    _reserved_amount += reserved;
+    _committed_amount += committed;
+  }
+
+  inline size_t count() const {
+    return _count;
+  }
+
+  inline size_t reserved_amount() const {
+    return _reserved_amount;
+  }
+
+  inline size_t committed_amount() const {
+    return _committed_amount;
+  }
+};
+
+// maps a memory type flag to readable name
+typedef struct _memType2Name {
+  MEMFLAGS     _flag;
+  const char*  _name;
+} MemType2Name;
+
+
+// This class aggregates malloc'd records by memory type
+class MallocMem : public _ValueObj {
+ private:
+  MEMFLAGS       _type;
+
+  size_t         _count;
+  size_t         _amount;
+
+ public:
+  MallocMem() {
+    _type = mtNone;
+    _count = 0;
+    _amount = 0;
+  }
+
+  MallocMem(MEMFLAGS flags) {
+    assert(HAS_VALID_MEMORY_TYPE(flags), "no type");
+    _type = FLAGS_TO_MEMORY_TYPE(flags);
+    _count = 0;
+    _amount = 0;
+  }
+
+  inline void set_type(MEMFLAGS flag) {
+    _type = flag;
+  }
+
+  inline void clear() {
+    _count = 0;
+    _amount = 0;
+    _type = mtNone;
+  }
+
+  MallocMem& operator=(const MallocMem& m) {
+    assert(_type == m.type(), "different type");
+    _count = m.count();
+    _amount = m.amount();
+    return *this;
+  }
+
+  inline void inc(size_t amt) {
+    _amount += amt;
+    _count ++;
+  }
+
+  inline void reduce(size_t amt) {
+    assert(_amount >= amt, "Just check");
+    _amount -= amt;
+  }
+
+  inline void overwrite_counter(size_t count) {
+    _count = count;
+  }
+
+  inline MEMFLAGS type() const {
+    return _type;
+  }
+
+  inline bool is_type(MEMFLAGS flags) const {
+    return FLAGS_TO_MEMORY_TYPE(flags) == _type;
+  }
+
+  inline size_t count() const {
+    return _count;
+  }
+
+  inline size_t amount() const {
+    return _amount;
+  }
+};
+
+// This class records live arena's memory usage
+class ArenaMem : public MallocMem {
+ public:
+  ArenaMem(MEMFLAGS typeflag): MallocMem(typeflag) {
+  }
+  ArenaMem() { }
+};
+
+// This class aggregates virtual memory by its memory type
+class VMMem : public _ValueObj {
+ private:
+  MEMFLAGS       _type;
+
+  size_t         _count;
+  size_t         _reserved_amount;
+  size_t         _committed_amount;
+
+ public:
+  VMMem() {
+    _type = mtNone;
+    _count = 0;
+    _reserved_amount = 0;
+    _committed_amount = 0;
+  }
+
+  VMMem(MEMFLAGS flags) {
+    assert(HAS_VALID_MEMORY_TYPE(flags), "no type");
+    _type = FLAGS_TO_MEMORY_TYPE(flags);
+    _count = 0;
+    _reserved_amount = 0;
+    _committed_amount = 0;
+  }
+
+  inline void clear() {
+    _type = mtNone;
+    _count = 0;
+    _reserved_amount = 0;
+    _committed_amount = 0;
+  }
+
+  inline void set_type(MEMFLAGS flags) {
+    _type = FLAGS_TO_MEMORY_TYPE(flags);
+  }
+
+  VMMem& operator=(const VMMem& m) {
+    assert(_type == m.type(), "different type");
+
+    _count = m.count();
+    _reserved_amount = m.reserved_amount();
+    _committed_amount = m.committed_amount();
+    return *this;
+  }
+
+
+  inline MEMFLAGS type() const {
+    return _type;
+  }
+
+  inline bool is_type(MEMFLAGS flags) const {
+    return FLAGS_TO_MEMORY_TYPE(flags) == _type;
+  }
+
+  inline void inc(size_t reserved_amt, size_t committed_amt) {
+    _reserved_amount += reserved_amt;
+    _committed_amount += committed_amt;
+    _count ++;
+  }
+
+  inline size_t count() const {
+    return _count;
+  }
+
+  inline size_t reserved_amount() const {
+    return _reserved_amount;
+  }
+
+  inline size_t committed_amount() const {
+    return _committed_amount;
+  }
+};
+
+
+
+#define NUMBER_OF_MEMORY_TYPE    (mt_number_of_types + 1)
+
+class BaselineReporter;
+class BaselineComparisonReporter;
+
+/*
+ * This class baselines current memory snapshot.
+ * A memory baseline summarizes memory usage by memory type,
+ * aggregates memory usage by callsites when detail tracking
+ * is on.
+ */
+class MemBaseline : public _ValueObj {
+  friend class BaselineReporter;
+  friend class BaselineComparisonReporter;
+
+ private:
+  // overall summaries
+  size_t        _total_malloced;
+  size_t        _total_vm_reserved;
+  size_t        _total_vm_committed;
+  size_t        _number_of_classes;
+  size_t        _number_of_threads;
+
+  // if it has properly baselined
+  bool          _baselined;
+
+  // we categorize memory into three categories within the memory type
+  MallocMem     _malloc_data[NUMBER_OF_MEMORY_TYPE];
+  VMMem         _vm_data[NUMBER_OF_MEMORY_TYPE];
+  ArenaMem      _arena_data[NUMBER_OF_MEMORY_TYPE];
+
+  // memory records that aggregate memory usage by callsites.
+  // only available when detail tracking is on.
+  MemPointerArray*  _malloc_cs;
+  MemPointerArray*  _vm_cs;
+
+ private:
+  static MemType2Name  MemType2NameMap[NUMBER_OF_MEMORY_TYPE];
+
+ private:
+  // should not use copy constructor
+  MemBaseline(MemBaseline& copy) { ShouldNotReachHere(); }
+
+ public:
+  // create a memory baseline
+  MemBaseline();
+
+  virtual ~MemBaseline();
+
+  inline bool baselined() const {
+    return _baselined;
+  }
+
+  MemBaseline& operator=(const MemBaseline& other);
+
+  // reset the baseline for reuse
+  void clear();
+
+  // baseline the snapshot
+  bool baseline(MemSnapshot& snapshot, bool summary_only = true);
+
+  bool baseline(const MemPointerArray* malloc_records,
+                const MemPointerArray* vm_records,
+                bool summary_only = true);
+
+  // total malloc'd memory of specified memory type
+  inline size_t malloc_amount(MEMFLAGS flag) const {
+    return _malloc_data[flag2index(flag)].amount();
+  }
+  // number of malloc'd memory blocks of specified memory type
+  inline size_t malloc_count(MEMFLAGS flag) const {
+    return _malloc_data[flag2index(flag)].count();
+  }
+  // total memory used by arenas of specified memory type
+  inline size_t arena_amount(MEMFLAGS flag) const {
+    return _arena_data[flag2index(flag)].amount();
+  }
+  // number of arenas of specified memory type
+  inline size_t arena_count(MEMFLAGS flag) const {
+    return _arena_data[flag2index(flag)].count();
+  }
+  // total reserved memory of specified memory type
+  inline size_t reserved_amount(MEMFLAGS flag) const {
+    return _vm_data[flag2index(flag)].reserved_amount();
+  }
+  // total committed memory of specified memory type
+  inline size_t committed_amount(MEMFLAGS flag) const {
+    return _vm_data[flag2index(flag)].committed_amount();
+  }
+  // total memory (malloc'd + mmap'd + arena) of specified
+  // memory type
+  inline size_t total_amount(MEMFLAGS flag) const {
+    int index = flag2index(flag);
+    return _malloc_data[index].amount() +
+           _vm_data[index].reserved_amount() +
+           _arena_data[index].amount();
+  }
+
+  /* overall summaries */
+
+  // total malloc'd memory in snapshot
+  inline size_t total_malloc_amount() const {
+    return _total_malloced;
+  }
+  // total mmap'd memory in snapshot
+  inline size_t total_reserved_amount() const {
+    return _total_vm_reserved;
+  }
+  // total committed memory in snapshot
+  inline size_t total_committed_amount() const {
+    return _total_vm_committed;
+  }
+  // number of loaded classes
+  inline size_t number_of_classes() const {
+    return _number_of_classes;
+  }
+  // number of running threads
+  inline size_t number_of_threads() const {
+    return _number_of_threads;
+  }
+  // lookup human readable name of a memory type
+  static const char* type2name(MEMFLAGS type);
+
+ private:
+  // convert memory flag to the index to mapping table
+  int         flag2index(MEMFLAGS flag) const;
+
+  // reset baseline values
+  void reset();
+
+  // summarize the records in global snapshot
+  bool baseline_malloc_summary(const MemPointerArray* malloc_records);
+  bool baseline_vm_summary(const MemPointerArray* vm_records);
+  bool baseline_malloc_details(const MemPointerArray* malloc_records);
+  bool baseline_vm_details(const MemPointerArray* vm_records);
+
+  // print a line of malloc'd memory aggregated by callsite
+  void print_malloc_callsite(outputStream* st, address pc, size_t size,
+    size_t count, int diff_amt, int diff_count) const;
+  // print a line of mmap'd memory aggregated by callsite
+  void print_vm_callsite(outputStream* st, address pc, size_t rsz,
+    size_t csz, int diff_rsz, int diff_csz) const;
+
+  // sorting functions for raw records
+  static int malloc_sort_by_pc(const void* p1, const void* p2);
+  static int malloc_sort_by_addr(const void* p1, const void* p2);
+
+  static int vm_sort_by_pc(const void* p1, const void* p2);
+  static int vm_sort_by_addr(const void* p1, const void* p2);
+
+ private:
+  // sorting functions for baselined records
+  static int bl_malloc_sort_by_size(const void* p1, const void* p2);
+  static int bl_vm_sort_by_size(const void* p1, const void* p2);
+  static int bl_malloc_sort_by_pc(const void* p1, const void* p2);
+  static int bl_vm_sort_by_pc(const void* p1, const void* p2);
+};
+
+
+#endif // SHARE_VM_SERVICES_MEM_BASELINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/memPtr.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "services/memPtr.hpp"
+#include "services/memTracker.hpp"
+
+volatile jint SequenceGenerator::_seq_number = 1;
+DEBUG_ONLY(jint SequenceGenerator::_max_seq_number = 1;)
+DEBUG_ONLY(volatile unsigned long SequenceGenerator::_generation = 0;)
+
+jint SequenceGenerator::next() {
+  jint seq = Atomic::add(1, &_seq_number);
+  if (seq < 0) {
+    MemTracker::shutdown(MemTracker::NMT_sequence_overflow);
+  }
+  assert(seq > 0, "counter overflow");
+  DEBUG_ONLY(_max_seq_number = (seq > _max_seq_number) ? seq : _max_seq_number;)
+  return seq;
+}
+
+
+
+bool VMMemRegion::contains(const VMMemRegion* mr) const {
+  assert(base() != 0, "no base address");
+  assert(size() != 0 || committed_size() != 0,
+    "no range");
+  address base_addr = base();
+  address end_addr = base_addr +
+    (is_reserve_record()? reserved_size(): committed_size());
+  if (mr->is_reserve_record()) {
+    if (mr->base() == base_addr && mr->size() == size()) {
+      // the same range
+      return true;
+    }
+    return false;
+  } else if (mr->is_commit_record() || mr->is_uncommit_record()) {
+    assert(mr->base() != 0 && mr->committed_size() > 0,
+      "bad record");
+    return (mr->base() >= base_addr &&
+      (mr->base() + mr->committed_size()) <= end_addr);
+  } else if (mr->is_type_tagging_record()) {
+    assert(mr->base() != 0, "no base");
+    return mr->base() == base_addr;
+  } else if (mr->is_release_record()) {
+    assert(mr->base() != 0 && mr->size() > 0,
+      "bad record");
+    return (mr->base() == base_addr && mr->size() == size());
+  } else {
+    assert(false, "what happened?");
+    return false;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/memPtr.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_MEM_PTR_HPP
+#define SHARE_VM_SERVICES_MEM_PTR_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/os.hpp"
+#include "runtime/safepoint.hpp"
+
+/*
+ * global sequence generator that generates sequence numbers to serialize
+ * memory records.
+ */
+class SequenceGenerator : AllStatic {
+ public:
+  static jint next();
+
+  // peek last sequence number
+  static jint peek() {
+    return _seq_number;
+  }
+
+  // reset sequence number
+  static void reset() {
+    assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
+    _seq_number = 1;
+    DEBUG_ONLY(_generation ++;)
+  };
+
+  DEBUG_ONLY(static unsigned long current_generation() { return (unsigned long)_generation; })
+  DEBUG_ONLY(static jint max_seq_num() { return _max_seq_number; })
+
+ private:
+  static volatile jint _seq_number;
+  DEBUG_ONLY(static jint _max_seq_number; )
+  DEBUG_ONLY(static volatile unsigned long _generation; )
+};
+
+/*
+ * followings are the classes that are used to hold memory activity records in different stages.
+ *   MemPointer
+ *     |--------MemPointerRecord
+ *                     |
+ *                     |----MemPointerRecordEx
+ *                     |           |
+ *                     |           |-------SeqMemPointerRecordEx
+ *                     |
+ *                     |----SeqMemPointerRecord
+ *                     |
+ *                     |----VMMemRegion
+ *                               |
+ *                               |-----VMMemRegionEx
+ *
+ *
+ *  prefix 'Seq' - sequenced, the record contains a sequence number
+ *  surfix 'Ex'  - extension, the record contains a caller's pc
+ *
+ *  per-thread recorder : SeqMemPointerRecord(Ex)
+ *  snapshot staging    : SeqMemPointerRecord(Ex)
+ *  snapshot            : MemPointerRecord(Ex) and VMMemRegion(Ex)
+ *
+ */
+
+/*
+ * class that wraps an address to a memory block,
+ * the memory pointer either points to a malloc'd
+ * memory block, or a mmap'd memory block
+ */
+class MemPointer : public _ValueObj {
+ public:
+  MemPointer(): _addr(0) { }
+  MemPointer(address addr): _addr(addr) { }
+
+  MemPointer(const MemPointer& copy_from) {
+    _addr = copy_from.addr();
+  }
+
+  inline address addr() const {
+    return _addr;
+  }
+
+  inline operator address() const {
+    return addr();
+  }
+
+  inline bool operator == (const MemPointer& other) const {
+    return addr() == other.addr();
+  }
+
+  inline MemPointer& operator = (const MemPointer& other) {
+    _addr = other.addr();
+    return *this;
+  }
+
+ protected:
+  inline void set_addr(address addr) { _addr = addr; }
+
+ protected:
+  // memory address
+  address    _addr;
+};
+
+/* MemPointerRecord records an activityand associated
+ * attributes on a memory block.
+ */
+class MemPointerRecord : public MemPointer {
+ private:
+  MEMFLAGS       _flags;
+  size_t         _size;
+
+public:
+  /* extension of MemoryType enum
+   * see share/vm/memory/allocation.hpp for details.
+   *
+   * The tag values are associated to sorting orders, so be
+   * careful if changes are needed.
+   * The allocation records should be sorted ahead of tagging
+   * records, which in turn ahead of deallocation records
+   */
+  enum MemPointerTags {
+    tag_alloc            = 0x0001, // malloc or reserve record
+    tag_commit           = 0x0002, // commit record
+    tag_type             = 0x0003, // tag virtual memory to a memory type
+    tag_uncommit         = 0x0004, // uncommit record
+    tag_release          = 0x0005, // free or release record
+    tag_size             = 0x0006, // arena size
+    tag_masks            = 0x0007, // all tag bits
+    vmBit                = 0x0008
+  };
+
+  /* helper functions to interpret the tagging flags */
+
+  inline static bool is_allocation_record(MEMFLAGS flags) {
+    return (flags & tag_masks) == tag_alloc;
+  }
+
+  inline static bool is_deallocation_record(MEMFLAGS flags) {
+    return (flags & tag_masks) == tag_release;
+  }
+
+  inline static bool is_arena_record(MEMFLAGS flags) {
+    return (flags & (otArena | tag_size)) == otArena;
+  }
+
+  inline static bool is_arena_size_record(MEMFLAGS flags) {
+    return (flags & (otArena | tag_size)) == (otArena | tag_size);
+  }
+
+  inline static bool is_virtual_memory_record(MEMFLAGS flags) {
+    return (flags & vmBit) != 0;
+  }
+
+  inline static bool is_virtual_memory_reserve_record(MEMFLAGS flags) {
+    return (flags & 0x0F) == (tag_alloc | vmBit);
+  }
+
+  inline static bool is_virtual_memory_commit_record(MEMFLAGS flags) {
+    return (flags & 0x0F) == (tag_commit | vmBit);
+  }
+
+  inline static bool is_virtual_memory_uncommit_record(MEMFLAGS flags) {
+    return (flags & 0x0F) == (tag_uncommit | vmBit);
+  }
+
+  inline static bool is_virtual_memory_release_record(MEMFLAGS flags) {
+    return (flags & 0x0F) == (tag_release | vmBit);
+  }
+
+  inline static bool is_virtual_memory_type_record(MEMFLAGS flags) {
+    return (flags & 0x0F) == (tag_type | vmBit);
+  }
+
+  /* tagging flags */
+  inline static MEMFLAGS malloc_tag()                 { return tag_alloc;   }
+  inline static MEMFLAGS free_tag()                   { return tag_release; }
+  inline static MEMFLAGS arena_size_tag()             { return tag_size | otArena; }
+  inline static MEMFLAGS virtual_memory_tag()         { return vmBit; }
+  inline static MEMFLAGS virtual_memory_reserve_tag() { return (tag_alloc | vmBit); }
+  inline static MEMFLAGS virtual_memory_commit_tag()  { return (tag_commit | vmBit); }
+  inline static MEMFLAGS virtual_memory_uncommit_tag(){ return (tag_uncommit | vmBit); }
+  inline static MEMFLAGS virtual_memory_release_tag() { return (tag_release | vmBit); }
+  inline static MEMFLAGS virtual_memory_type_tag()    { return (tag_type | vmBit); }
+
+ public:
+  MemPointerRecord(): _size(0), _flags(mtNone) { }
+
+  MemPointerRecord(address addr, MEMFLAGS memflags, size_t size = 0):
+      MemPointer(addr), _flags(memflags), _size(size) { }
+
+  MemPointerRecord(const MemPointerRecord& copy_from):
+    MemPointer(copy_from), _flags(copy_from.flags()),
+    _size(copy_from.size()) {
+  }
+
+  /* MemPointerRecord is not sequenced, it always return
+   * 0 to indicate non-sequenced
+   */
+  virtual jint seq() const               { return 0; }
+
+  inline size_t   size()  const          { return _size; }
+  inline void set_size(size_t size)      { _size = size; }
+
+  inline MEMFLAGS flags() const          { return _flags; }
+  inline void set_flags(MEMFLAGS flags)  { _flags = flags; }
+
+  MemPointerRecord& operator= (const MemPointerRecord& ptr) {
+    MemPointer::operator=(ptr);
+    _flags = ptr.flags();
+#ifdef ASSERT
+    if (IS_ARENA_OBJ(_flags)) {
+      assert(!is_vm_pointer(), "wrong flags");
+      assert((_flags & ot_masks) == otArena, "wrong flags");
+    }
+#endif
+    _size = ptr.size();
+    return *this;
+  }
+
+  // if the pointer represents a malloc-ed memory address
+  inline bool is_malloced_pointer() const {
+    return !is_vm_pointer();
+  }
+
+  // if the pointer represents a virtual memory address
+  inline bool is_vm_pointer() const {
+    return is_virtual_memory_record(_flags);
+  }
+
+  // if this record records a 'malloc' or virtual memory
+  // 'reserve' call
+  inline bool is_allocation_record() const {
+    return is_allocation_record(_flags);
+  }
+
+  // if this record records a size information of an arena
+  inline bool is_arena_size_record() const {
+    return is_arena_size_record(_flags);
+  }
+
+  // if this pointer represents an address to an arena object
+  inline bool is_arena_record() const {
+    return is_arena_record(_flags);
+  }
+
+  // if this record represents a size information of specific arena
+  inline bool is_size_record_of_arena(const MemPointerRecord* arena_rc) {
+    assert(is_arena_size_record(), "not size record");
+    assert(arena_rc->is_arena_record(), "not arena record");
+    return (arena_rc->addr() + sizeof(void*)) == addr();
+  }
+
+  // if this record records a 'free' or virtual memory 'free' call
+  inline bool is_deallocation_record() const {
+    return is_deallocation_record(_flags);
+  }
+
+  // if this record records a virtual memory 'commit' call
+  inline bool is_commit_record() const {
+    return is_virtual_memory_commit_record(_flags);
+  }
+
+  // if this record records a virtual memory 'uncommit' call
+  inline bool is_uncommit_record() const {
+    return is_virtual_memory_uncommit_record(_flags);
+  }
+
+  // if this record is a tagging record of a virtual memory block
+  inline bool is_type_tagging_record() const {
+    return is_virtual_memory_type_record(_flags);
+  }
+};
+
+// MemPointerRecordEx also records callsite pc, from where
+// the memory block is allocated
+class MemPointerRecordEx : public MemPointerRecord {
+ private:
+  address      _pc;  // callsite pc
+
+ public:
+  MemPointerRecordEx(): _pc(0) { }
+
+  MemPointerRecordEx(address addr, MEMFLAGS memflags, size_t size = 0, address pc = 0):
+    MemPointerRecord(addr, memflags, size), _pc(pc) {}
+
+  MemPointerRecordEx(const MemPointerRecordEx& copy_from):
+    MemPointerRecord(copy_from), _pc(copy_from.pc()) {}
+
+  inline address pc() const { return _pc; }
+
+  void init(const MemPointerRecordEx* mpe) {
+    MemPointerRecord::operator=(*mpe);
+    _pc = mpe->pc();
+  }
+
+  void init(const MemPointerRecord* mp) {
+    MemPointerRecord::operator=(*mp);
+    _pc = 0;
+  }
+};
+
+// a virtual memory region
+class VMMemRegion : public MemPointerRecord {
+ private:
+  // committed size
+  size_t       _committed_size;
+
+public:
+  VMMemRegion(): _committed_size(0) { }
+
+  void init(const MemPointerRecord* mp) {
+    assert(mp->is_vm_pointer(), "not virtual memory pointer");
+    _addr = mp->addr();
+    if (mp->is_commit_record() || mp->is_uncommit_record()) {
+      _committed_size = mp->size();
+      set_size(_committed_size);
+    } else {
+      set_size(mp->size());
+      _committed_size = 0;
+    }
+    set_flags(mp->flags());
+  }
+
+  VMMemRegion& operator=(const VMMemRegion& other) {
+    MemPointerRecord::operator=(other);
+    _committed_size = other.committed_size();
+    return *this;
+  }
+
+  inline bool is_reserve_record() const {
+    return is_virtual_memory_reserve_record(flags());
+  }
+
+  inline bool is_release_record() const {
+    return is_virtual_memory_release_record(flags());
+  }
+
+  // resize reserved VM range
+  inline void set_reserved_size(size_t new_size) {
+    assert(new_size >= committed_size(), "resize");
+    set_size(new_size);
+  }
+
+  inline void commit(size_t size) {
+    _committed_size += size;
+  }
+
+  inline void uncommit(size_t size) {
+    if (_committed_size >= size) {
+      _committed_size -= size;
+    } else {
+      _committed_size = 0;
+    }
+  }
+
+  /*
+   * if this virtual memory range covers whole range of
+   * the other VMMemRegion
+   */
+  bool contains(const VMMemRegion* mr) const;
+
+  /* base address of this virtual memory range */
+  inline address base() const {
+    return addr();
+  }
+
+  /* tag this virtual memory range to the specified memory type */
+  inline void tag(MEMFLAGS f) {
+    set_flags(flags() | (f & mt_masks));
+  }
+
+  // release part of memory range
+  inline void partial_release(address add, size_t sz) {
+    assert(add >= addr() && add < addr() + size(), "not valid address");
+    // for now, it can partially release from the both ends,
+    // but not in the middle
+    assert(add == addr() || (add + sz) == (addr() + size()),
+      "release in the middle");
+    if (add == addr()) {
+      set_addr(add + sz);
+      set_size(size() - sz);
+    } else {
+      set_size(size() - sz);
+    }
+  }
+
+  // the committed size of the virtual memory block
+  inline size_t committed_size() const {
+    return _committed_size;
+  }
+
+  // the reserved size of the virtual memory block
+  inline size_t reserved_size() const {
+    return size();
+  }
+};
+
+class VMMemRegionEx : public VMMemRegion {
+ private:
+  jint   _seq;  // sequence number
+
+ public:
+  VMMemRegionEx(): _pc(0) { }
+
+  void init(const MemPointerRecordEx* mpe) {
+    VMMemRegion::init(mpe);
+    _pc = mpe->pc();
+  }
+
+  void init(const MemPointerRecord* mpe) {
+    VMMemRegion::init(mpe);
+    _pc = 0;
+  }
+
+  VMMemRegionEx& operator=(const VMMemRegionEx& other) {
+    VMMemRegion::operator=(other);
+    _pc = other.pc();
+    return *this;
+  }
+
+  inline address pc() const { return _pc; }
+ private:
+  address   _pc;
+};
+
+/*
+ * Sequenced memory record
+ */
+class SeqMemPointerRecord : public MemPointerRecord {
+ private:
+   jint _seq;  // sequence number
+
+ public:
+  SeqMemPointerRecord(): _seq(0){ }
+
+  SeqMemPointerRecord(address addr, MEMFLAGS flags, size_t size)
+    : MemPointerRecord(addr, flags, size) {
+    _seq = SequenceGenerator::next();
+  }
+
+  SeqMemPointerRecord(const SeqMemPointerRecord& copy_from)
+    : MemPointerRecord(copy_from) {
+    _seq = copy_from.seq();
+  }
+
+  SeqMemPointerRecord& operator= (const SeqMemPointerRecord& ptr) {
+    MemPointerRecord::operator=(ptr);
+    _seq = ptr.seq();
+    return *this;
+  }
+
+  inline jint seq() const {
+    return _seq;
+  }
+};
+
+
+
+class SeqMemPointerRecordEx : public MemPointerRecordEx {
+ private:
+  jint    _seq;  // sequence number
+
+ public:
+  SeqMemPointerRecordEx(): _seq(0) { }
+
+  SeqMemPointerRecordEx(address addr, MEMFLAGS flags, size_t size,
+    address pc): MemPointerRecordEx(addr, flags, size, pc) {
+    _seq = SequenceGenerator::next();
+  }
+
+  SeqMemPointerRecordEx(const SeqMemPointerRecordEx& copy_from)
+    : MemPointerRecordEx(copy_from) {
+    _seq = copy_from.seq();
+  }
+
+  SeqMemPointerRecordEx& operator= (const SeqMemPointerRecordEx& ptr) {
+    MemPointerRecordEx::operator=(ptr);
+    _seq = ptr.seq();
+    return *this;
+  }
+
+  inline jint seq() const {
+    return _seq;
+  }
+};
+
+#endif // SHARE_VM_SERVICES_MEM_PTR_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/memPtrArray.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#ifndef SHARE_VM_UTILITIES_MEM_PTR_ARRAY_HPP
+#define SHARE_VM_UTILITIES_MEM_PTR_ARRAY_HPP
+
+#include "memory/allocation.hpp"
+#include "services/memPtr.hpp"
+
+class MemPtr;
+class MemRecorder;
+class ArenaInfo;
+class MemSnapshot;
+
+extern "C" {
+  typedef int (*FN_SORT)(const void *, const void *);
+}
+
+
+// Memory pointer array interface. This array is used by NMT to hold
+// various memory block information.
+// The memory pointer arrays are usually walked with their iterators.
+
+class MemPointerArray : public CHeapObj<mtNMT> {
+ public:
+  virtual ~MemPointerArray() { }
+
+  // return true if it can not allocate storage for the data
+  virtual bool out_of_memory() const = 0;
+  virtual bool is_empty() const = 0;
+  virtual bool is_full() = 0;
+  virtual int  length() const = 0;
+  virtual void clear() = 0;
+  virtual bool append(MemPointer* ptr) = 0;
+  virtual bool insert_at(MemPointer* ptr, int pos) = 0;
+  virtual bool remove_at(int pos) = 0;
+  virtual MemPointer* at(int index) const = 0;
+  virtual void sort(FN_SORT fn) = 0;
+  virtual size_t instance_size() const = 0;
+  virtual bool shrink() = 0;
+
+  debug_only(virtual int capacity() const = 0;)
+};
+
+// Iterator interface
+class MemPointerArrayIterator VALUE_OBJ_CLASS_SPEC {
+ public:
+  // return the pointer at current position
+  virtual MemPointer* current() const = 0;
+  // return the next pointer and advance current position
+  virtual MemPointer* next() = 0;
+  // return next pointer without advancing current position
+  virtual MemPointer* peek_next() const = 0;
+  // return previous pointer without changing current position
+  virtual MemPointer* peek_prev() const = 0;
+  // remove the pointer at current position
+  virtual void        remove() = 0;
+  // insert the pointer at current position
+  virtual bool        insert(MemPointer* ptr) = 0;
+  // insert specified element after current position and
+  // move current position to newly inserted position
+  virtual bool        insert_after(MemPointer* ptr) = 0;
+};
+
+// implementation class
+class MemPointerArrayIteratorImpl : public MemPointerArrayIterator {
+#ifdef ASSERT
+ protected:
+#else
+ private:
+#endif
+  MemPointerArray*  _array;
+  int               _pos;
+
+ public:
+  MemPointerArrayIteratorImpl(MemPointerArray* arr) {
+    assert(arr != NULL, "Parameter check");
+    _array = arr;
+    _pos = 0;
+  }
+
+  virtual MemPointer* current() const {
+    if (_pos < _array->length()) {
+      return _array->at(_pos);
+    }
+    return NULL;
+  }
+
+  virtual MemPointer* next() {
+    if (_pos + 1 < _array->length()) {
+      return _array->at(++_pos);
+    }
+    _pos = _array->length();
+    return NULL;
+  }
+
+  virtual MemPointer* peek_next() const {
+    if (_pos + 1 < _array->length()) {
+      return _array->at(_pos + 1);
+    }
+    return NULL;
+  }
+
+  virtual MemPointer* peek_prev() const {
+    if (_pos > 0) {
+      return _array->at(_pos - 1);
+    }
+    return NULL;
+  }
+
+  virtual void remove() {
+    if (_pos < _array->length()) {
+      _array->remove_at(_pos);
+    }
+  }
+
+  virtual bool insert(MemPointer* ptr) {
+    return _array->insert_at(ptr, _pos);
+  }
+
+  virtual bool insert_after(MemPointer* ptr) {
+    if (_array->insert_at(ptr, _pos + 1)) {
+      _pos ++;
+      return true;
+    }
+    return false;
+  }
+};
+
+
+
+// Memory pointer array implementation.
+// This implementation implements expandable array
+#define DEFAULT_PTR_ARRAY_SIZE 1024
+
+template <class E> class MemPointerArrayImpl : public MemPointerArray {
+ private:
+  int                   _max_size;
+  int                   _size;
+  bool                  _init_elements;
+  E*                    _data;
+
+ public:
+  MemPointerArrayImpl(int initial_size = DEFAULT_PTR_ARRAY_SIZE, bool init_elements = true):
+   _max_size(initial_size), _size(0), _init_elements(init_elements) {
+    _data = (E*)raw_allocate(sizeof(E), initial_size);
+    if (_init_elements) {
+      for (int index = 0; index < _max_size; index ++) {
+        ::new ((void*)&_data[index]) E();
+      }
+    }
+  }
+
+  virtual ~MemPointerArrayImpl() {
+    if (_data != NULL) {
+      raw_free(_data);
+    }
+  }
+
+ public:
+  bool out_of_memory() const {
+    return (_data == NULL);
+  }
+
+  size_t instance_size() const {
+    return sizeof(MemPointerArrayImpl<E>) + _max_size * sizeof(E);
+  }
+
+  bool is_empty() const {
+    assert(_data != NULL, "Just check");
+    return _size == 0;
+  }
+
+  bool is_full() {
+    assert(_data != NULL, "Just check");
+    if (_size < _max_size) {
+      return false;
+    } else {
+      return !expand_array();
+    }
+  }
+
+  int length() const {
+    assert(_data != NULL, "Just check");
+    return _size;
+  }
+
+  debug_only(int capacity() const { return _max_size; })
+
+  void clear() {
+    assert(_data != NULL, "Just check");
+    _size = 0;
+  }
+
+  bool append(MemPointer* ptr) {
+    assert(_data != NULL, "Just check");
+    if (is_full()) {
+      return false;
+    }
+    _data[_size ++] = *(E*)ptr;
+    return true;
+  }
+
+  bool insert_at(MemPointer* ptr, int pos) {
+    assert(_data != NULL, "Just check");
+    if (is_full()) {
+      return false;
+    }
+    for (int index = _size; index > pos; index --) {
+      _data[index] = _data[index - 1];
+    }
+    _data[pos] = *(E*)ptr;
+    _size ++;
+    return true;
+  }
+
+  bool remove_at(int pos) {
+    assert(_data != NULL, "Just check");
+    if (_size <= pos && pos >= 0) {
+      return false;
+    }
+    -- _size;
+
+    for (int index = pos; index < _size; index ++) {
+      _data[index] = _data[index + 1];
+    }
+    return true;
+  }
+
+  MemPointer* at(int index) const {
+    assert(_data != NULL, "Just check");
+    assert(index >= 0 && index < _size, "illegal index");
+    return &_data[index];
+  }
+
+  bool shrink() {
+    float used = ((float)_size) / ((float)_max_size);
+    if (used < 0.40) {
+      E* old_ptr = _data;
+      int new_size = ((_max_size) / (2 * DEFAULT_PTR_ARRAY_SIZE) + 1) * DEFAULT_PTR_ARRAY_SIZE;
+      _data = (E*)raw_reallocate(_data, sizeof(E), new_size);
+      if (_data == NULL) {
+        _data = old_ptr;
+        return false;
+      } else {
+        _max_size = new_size;
+        return true;
+      }
+    }
+    return false;
+  }
+
+  void sort(FN_SORT fn) {
+    assert(_data != NULL, "Just check");
+    qsort((void*)_data, _size, sizeof(E), fn);
+  }
+
+ private:
+  bool  expand_array() {
+    assert(_data != NULL, "Not yet allocated");
+    E* old_ptr = _data;
+    if ((_data = (E*)raw_reallocate((void*)_data, sizeof(E),
+      _max_size + DEFAULT_PTR_ARRAY_SIZE)) == NULL) {
+      _data = old_ptr;
+      return false;
+    } else {
+      _max_size += DEFAULT_PTR_ARRAY_SIZE;
+      if (_init_elements) {
+        for (int index = _size; index < _max_size; index ++) {
+          ::new ((void*)&_data[index]) E();
+        }
+      }
+      return true;
+    }
+  }
+
+  void* raw_allocate(size_t elementSize, int items) {
+    return os::malloc(elementSize * items, mtNMT);
+  }
+
+  void* raw_reallocate(void* ptr, size_t elementSize, int items) {
+    return os::realloc(ptr, elementSize * items, mtNMT);
+  }
+
+  void  raw_free(void* ptr) {
+    os::free(ptr, mtNMT);
+  }
+};
+
+#endif // SHARE_VM_UTILITIES_MEM_PTR_ARRAY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/memRecorder.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "runtime/atomic.hpp"
+#include "services/memBaseline.hpp"
+#include "services/memRecorder.hpp"
+#include "services/memPtr.hpp"
+#include "services/memTracker.hpp"
+
+MemPointer* SequencedRecordIterator::next_record() {
+  MemPointer* itr_cur = _itr.current();
+  if (itr_cur == NULL) return NULL;
+  MemPointer* itr_next = _itr.next();
+
+  while (itr_next != NULL &&
+    same_kind((MemPointerRecord*)itr_cur, (MemPointerRecord*)itr_next)) {
+    itr_cur = itr_next;
+    itr_next = _itr.next();
+  }
+
+  return itr_cur;
+}
+
+
+debug_only(volatile jint MemRecorder::_instance_count = 0;)
+
+MemRecorder::MemRecorder() {
+  assert(MemTracker::is_on(), "Native memory tracking is off");
+  debug_only(Atomic::inc(&_instance_count);)
+  debug_only(set_generation();)
+
+  if (MemTracker::track_callsite()) {
+    _pointer_records = new (std::nothrow)FixedSizeMemPointerArray<SeqMemPointerRecordEx,
+        DEFAULT_RECORDER_PTR_ARRAY_SIZE>();
+  } else {
+    _pointer_records = new (std::nothrow)FixedSizeMemPointerArray<SeqMemPointerRecord,
+        DEFAULT_RECORDER_PTR_ARRAY_SIZE>();
+  }
+  _next = NULL;
+
+
+  if (_pointer_records != NULL) {
+    // recode itself
+    record((address)this, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder),
+        sizeof(MemRecorder), CALLER_PC);
+    record((address)_pointer_records, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder),
+        _pointer_records->instance_size(),CURRENT_PC);
+  }
+}
+
+MemRecorder::~MemRecorder() {
+  if (_pointer_records != NULL) {
+    if (MemTracker::is_on()) {
+      MemTracker::record_free((address)_pointer_records, mtNMT);
+      MemTracker::record_free((address)this, mtNMT);
+    }
+    delete _pointer_records;
+  }
+  if (_next != NULL) {
+    delete _next;
+  }
+
+#ifdef ASSERT
+  Atomic::dec(&_instance_count);
+#endif
+}
+
+// Sorting order:
+//   1. memory block address
+//   2. mem pointer record tags
+//   3. sequence number
+int MemRecorder::sort_record_fn(const void* e1, const void* e2) {
+  const MemPointerRecord* p1 = (const MemPointerRecord*)e1;
+  const MemPointerRecord* p2 = (const MemPointerRecord*)e2;
+  int delta = UNSIGNED_COMPARE(p1->addr(), p2->addr());
+  if (delta == 0) {
+    int df = UNSIGNED_COMPARE((p1->flags() & MemPointerRecord::tag_masks),
+                              (p2->flags() & MemPointerRecord::tag_masks));
+    if (df == 0) {
+      assert(p1->seq() != p2->seq(), "dup seq");
+      return p1->seq() - p2->seq();
+    } else {
+      return df;
+    }
+  } else {
+    return delta;
+  }
+}
+
+bool MemRecorder::record(address p, MEMFLAGS flags, size_t size, address pc) {
+#ifdef ASSERT
+  if (MemPointerRecord::is_virtual_memory_record(flags)) {
+    assert((flags & MemPointerRecord::tag_masks) != 0, "bad virtual memory record");
+  } else {
+    assert((flags & MemPointerRecord::tag_masks) == MemPointerRecord::malloc_tag() ||
+           (flags & MemPointerRecord::tag_masks) == MemPointerRecord::free_tag() ||
+           IS_ARENA_OBJ(flags),
+           "bad malloc record");
+  }
+  // a recorder should only hold records within the same generation
+  unsigned long cur_generation = SequenceGenerator::current_generation();
+  assert(cur_generation == _generation,
+         "this thread did not enter sync point");
+#endif
+
+  if (MemTracker::track_callsite()) {
+    SeqMemPointerRecordEx ap(p, flags, size, pc);
+    debug_only(check_dup_seq(ap.seq());)
+    return _pointer_records->append(&ap);
+  } else {
+    SeqMemPointerRecord ap(p, flags, size);
+    debug_only(check_dup_seq(ap.seq());)
+    return _pointer_records->append(&ap);
+  }
+}
+
+  // iterator for alloc pointers
+SequencedRecordIterator MemRecorder::pointer_itr() {
+  assert(_pointer_records != NULL, "just check");
+  _pointer_records->sort((FN_SORT)sort_record_fn);
+  return SequencedRecordIterator(_pointer_records);
+}
+
+
+#ifdef ASSERT
+void MemRecorder::set_generation() {
+  _generation = SequenceGenerator::current_generation();
+}
+
+void MemRecorder::check_dup_seq(jint seq) const {
+  MemPointerArrayIteratorImpl itr(_pointer_records);
+  MemPointerRecord* rc = (MemPointerRecord*)itr.current();
+  while (rc != NULL) {
+    assert(rc->seq() != seq, "dup seq");
+    rc = (MemPointerRecord*)itr.next();
+  }
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/memRecorder.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_MEM_RECORDER_HPP
+#define SHARE_VM_SERVICES_MEM_RECORDER_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/os.hpp"
+#include "services/memPtrArray.hpp"
+
+class MemSnapshot;
+class MemTracker;
+class MemTrackWorker;
+
+// Fixed size memory pointer array implementation
+template <class E, int SIZE> class FixedSizeMemPointerArray :
+  public MemPointerArray {
+  // This implementation is for memory recorder only
+  friend class MemRecorder;
+
+ private:
+  E      _data[SIZE];
+  int    _size;
+
+ protected:
+  FixedSizeMemPointerArray(bool init_elements = false):
+   _size(0){
+    if (init_elements) {
+      for (int index = 0; index < SIZE; index ++) {
+        ::new ((void*)&_data[index]) E();
+      }
+    }
+  }
+
+  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) {
+    // the instance is part of memRecorder, needs to be tagged with 'otNMTRecorder'
+    // to avoid recursion
+    return os::malloc(size, (mtNMT | otNMTRecorder));
+  }
+
+  void* operator new(size_t size) {
+    assert(false, "use nothrow version");
+    return NULL;
+  }
+
+  void operator delete(void* p) {
+    os::free(p, (mtNMT | otNMTRecorder));
+  }
+
+  // instance size
+  inline size_t instance_size() const {
+    return sizeof(FixedSizeMemPointerArray<E, SIZE>);
+  }
+
+  debug_only(int capacity() const { return SIZE; })
+
+ public:
+  // implementation of public interface
+  bool out_of_memory() const { return false; }
+  bool is_empty()      const { return _size == 0; }
+  bool is_full()             { return length() >= SIZE; }
+  int  length()        const { return _size; }
+
+  void clear() {
+    _size = 0;
+  }
+
+  bool append(MemPointer* ptr) {
+    if (is_full()) return false;
+    _data[_size ++] = *(E*)ptr;
+    return true;
+  }
+
+  virtual bool insert_at(MemPointer* p, int pos) {
+    assert(false, "append only");
+    return false;
+  }
+
+  virtual bool remove_at(int pos) {
+    assert(false, "not supported");
+    return false;
+  }
+
+  MemPointer* at(int index) const {
+    assert(index >= 0 && index < length(),
+      "parameter check");
+    return ((E*)&_data[index]);
+  }
+
+  void sort(FN_SORT fn) {
+    qsort((void*)_data, _size, sizeof(E), fn);
+  }
+
+  bool shrink() {
+    return false;
+  }
+};
+
+
+// This iterator requires pre-sorted MemPointerArray, which is sorted by:
+//  1. address
+//  2. allocation type
+//  3. sequence number
+// During the array walking, iterator collapses pointers with the same
+// address and allocation type, and only returns the one with highest
+// sequence number.
+//
+// This is read-only iterator, update methods are asserted.
+class SequencedRecordIterator : public MemPointerArrayIterator {
+ private:
+   MemPointerArrayIteratorImpl _itr;
+   MemPointer*                 _cur;
+
+ public:
+  SequencedRecordIterator(const MemPointerArray* arr):
+    _itr(const_cast<MemPointerArray*>(arr)) {
+    _cur = next_record();
+  }
+
+  SequencedRecordIterator(const SequencedRecordIterator& itr):
+    _itr(itr._itr) {
+    _cur = next_record();
+  }
+
+  // return the pointer at current position
+  virtual MemPointer* current() const {
+    return _cur;
+  };
+
+  // return the next pointer and advance current position
+  virtual MemPointer* next() {
+    _cur = next_record();
+    return _cur;
+  }
+
+  // return the next pointer without advancing current position
+  virtual MemPointer* peek_next() const {
+    assert(false, "not implemented");
+    return NULL;
+
+  }
+  // return the previous pointer without changing current position
+  virtual MemPointer* peek_prev() const {
+    assert(false, "not implemented");
+    return NULL;
+  }
+
+  // remove the pointer at current position
+  virtual void remove() {
+    assert(false, "read-only iterator");
+  };
+  // insert the pointer at current position
+  virtual bool insert(MemPointer* ptr) {
+    assert(false, "read-only iterator");
+    return false;
+  }
+
+  virtual bool insert_after(MemPointer* ptr) {
+    assert(false, "read-only iterator");
+    return false;
+  }
+ private:
+  // collapse the 'same kind' of records, and return this 'kind' of
+  // record with highest sequence number
+  MemPointer* next_record();
+
+  // Test if the two records are the same kind: the same memory block and allocation
+  // type.
+  inline bool same_kind(const MemPointerRecord* p1, const MemPointerRecord* p2) const {
+    return (p1->addr() == p2->addr() &&
+      (p1->flags() &MemPointerRecord::tag_masks) ==
+      (p2->flags() & MemPointerRecord::tag_masks));
+  }
+};
+
+
+
+#define DEFAULT_RECORDER_PTR_ARRAY_SIZE 512
+
+class MemRecorder : public CHeapObj<mtNMT|otNMTRecorder> {
+  friend class MemSnapshot;
+  friend class MemTracker;
+  friend class MemTrackWorker;
+
+ protected:
+  // the array that holds memory records
+  MemPointerArray*         _pointer_records;
+
+ private:
+  // used for linked list
+  MemRecorder*             _next;
+  // active recorder can only record a certain generation data
+  debug_only(unsigned long _generation;)
+
+ protected:
+  _NOINLINE_ MemRecorder();
+  ~MemRecorder();
+
+  // record a memory operation
+  bool record(address addr, MEMFLAGS flags, size_t size, address caller_pc = 0);
+
+  // linked list support
+  inline void set_next(MemRecorder* rec) {
+    _next = rec;
+  }
+
+  inline MemRecorder* next() const {
+    return _next;
+  }
+
+  // if the recorder is full
+  inline bool is_full() const {
+    assert(_pointer_records != NULL, "just check");
+    return _pointer_records->is_full();
+  }
+
+  // if running out of memory when initializing recorder's internal
+  // data
+  inline bool out_of_memory() const {
+    return (_pointer_records == NULL ||
+      _pointer_records->out_of_memory());
+  }
+
+  inline void clear() {
+    assert(_pointer_records != NULL, "Just check");
+    _pointer_records->clear();
+  }
+
+  SequencedRecordIterator pointer_itr();
+
+ public:
+  // number of MemRecorder instance
+  debug_only(static volatile jint _instance_count;)
+
+ private:
+  // sorting function, sort records into following order
+  // 1. memory address
+  // 2. allocation type
+  // 3. sequence number
+  static int sort_record_fn(const void* e1, const void* e2);
+
+  debug_only(void check_dup_seq(jint seq) const;)
+  debug_only(void set_generation();)
+};
+
+#endif // SHARE_VM_SERVICES_MEM_RECORDER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/memReporter.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -0,0 +1,560 @@
+/*
+ * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "runtime/os.hpp"
+#include "services/memReporter.hpp"
+#include "services/memPtrArray.hpp"
+#include "services/memTracker.hpp"
+
+const char* BaselineOutputer::memory_unit(size_t scale) {
+  switch(scale) {
+    case K: return "KB";
+    case M: return "MB";
+    case G: return "GB";
+  }
+  ShouldNotReachHere();
+  return NULL;
+}
+
+
+void BaselineReporter::report_baseline(const MemBaseline& baseline, bool summary_only) {
+  assert(MemTracker::is_on(), "Native memory tracking is off");
+  _outputer.start(scale());
+  _outputer.total_usage(
+    amount_in_current_scale(baseline.total_malloc_amount() + baseline.total_reserved_amount()),
+    amount_in_current_scale(baseline.total_malloc_amount() + baseline.total_committed_amount()));
+
+  _outputer.num_of_classes(baseline.number_of_classes());
+  _outputer.num_of_threads(baseline.number_of_threads());
+
+  report_summaries(baseline);
+  if (!summary_only && MemTracker::track_callsite()) {
+    report_callsites(baseline);
+  }
+  _outputer.done();
+}
+
+void BaselineReporter::report_summaries(const MemBaseline& baseline) {
+  _outputer.start_category_summary();
+  MEMFLAGS type;
+
+  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
+    type = MemBaseline::MemType2NameMap[index]._flag;
+    _outputer.category_summary(type,
+      amount_in_current_scale(baseline.reserved_amount(type)),
+      amount_in_current_scale(baseline.committed_amount(type)),
+      amount_in_current_scale(baseline.malloc_amount(type)),
+      baseline.malloc_count(type),
+      amount_in_current_scale(baseline.arena_amount(type)),
+      baseline.arena_count(type));
+  }
+
+  _outputer.done_category_summary();
+}
+
+void BaselineReporter::report_callsites(const MemBaseline& baseline) {
+  _outputer.start_callsite();
+  MemBaseline* pBL = const_cast<MemBaseline*>(&baseline);
+
+  pBL->_malloc_cs->sort((FN_SORT)MemBaseline::bl_malloc_sort_by_size);
+  pBL->_vm_cs->sort((FN_SORT)MemBaseline::bl_vm_sort_by_size);
+
+  // walk malloc callsites
+  MemPointerArrayIteratorImpl malloc_itr(pBL->_malloc_cs);
+  MallocCallsitePointer*      malloc_callsite =
+                  (MallocCallsitePointer*)malloc_itr.current();
+  while (malloc_callsite != NULL) {
+    _outputer.malloc_callsite(malloc_callsite->addr(),
+        amount_in_current_scale(malloc_callsite->amount()), malloc_callsite->count());
+    malloc_callsite = (MallocCallsitePointer*)malloc_itr.next();
+  }
+
+  // walk virtual memory callsite
+  MemPointerArrayIteratorImpl vm_itr(pBL->_vm_cs);
+  VMCallsitePointer*          vm_callsite = (VMCallsitePointer*)vm_itr.current();
+  while (vm_callsite != NULL) {
+    _outputer.virtual_memory_callsite(vm_callsite->addr(),
+      amount_in_current_scale(vm_callsite->reserved_amount()),
+      amount_in_current_scale(vm_callsite->committed_amount()));
+    vm_callsite = (VMCallsitePointer*)vm_itr.next();
+  }
+  pBL->_malloc_cs->sort((FN_SORT)MemBaseline::bl_malloc_sort_by_pc);
+  pBL->_vm_cs->sort((FN_SORT)MemBaseline::bl_vm_sort_by_pc);
+  _outputer.done_callsite();
+}
+
+void BaselineReporter::diff_baselines(const MemBaseline& cur, const MemBaseline& prev,
+  bool summary_only) {
+  assert(MemTracker::is_on(), "Native memory tracking is off");
+  _outputer.start(scale());
+  size_t total_reserved = cur.total_malloc_amount() + cur.total_reserved_amount();
+  size_t total_committed = cur.total_malloc_amount() + cur.total_committed_amount();
+
+  _outputer.diff_total_usage(
+    amount_in_current_scale(total_reserved), amount_in_current_scale(total_committed),
+    diff_in_current_scale(total_reserved,  (prev.total_malloc_amount() + prev.total_reserved_amount())),
+    diff_in_current_scale(total_committed, (prev.total_committed_amount() + prev.total_malloc_amount())));
+
+  _outputer.diff_num_of_classes(cur.number_of_classes(),
+       diff(cur.number_of_classes(), prev.number_of_classes()));
+  _outputer.diff_num_of_threads(cur.number_of_threads(),
+       diff(cur.number_of_threads(), prev.number_of_threads()));
+
+  diff_summaries(cur, prev);
+  if (!summary_only && MemTracker::track_callsite()) {
+    diff_callsites(cur, prev);
+  }
+  _outputer.done();
+}
+
+void BaselineReporter::diff_summaries(const MemBaseline& cur, const MemBaseline& prev) {
+  _outputer.start_category_summary();
+  MEMFLAGS type;
+
+  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
+    type = MemBaseline::MemType2NameMap[index]._flag;
+    _outputer.diff_category_summary(type,
+      amount_in_current_scale(cur.reserved_amount(type)),
+      amount_in_current_scale(cur.committed_amount(type)),
+      amount_in_current_scale(cur.malloc_amount(type)),
+      cur.malloc_count(type),
+      amount_in_current_scale(cur.arena_amount(type)),
+      cur.arena_count(type),
+      diff_in_current_scale(cur.reserved_amount(type), prev.reserved_amount(type)),
+      diff_in_current_scale(cur.committed_amount(type), prev.committed_amount(type)),
+      diff_in_current_scale(cur.malloc_amount(type), prev.malloc_amount(type)),
+      diff(cur.malloc_count(type), prev.malloc_count(type)),
+      diff_in_current_scale(cur.arena_amount(type), prev.arena_amount(type)),
+      diff(cur.arena_count(type), prev.arena_count(type)));
+  }
+
+  _outputer.done_category_summary();
+}
+
+void BaselineReporter::diff_callsites(const MemBaseline& cur, const MemBaseline& prev) {
+  _outputer.start_callsite();
+  MemBaseline* pBL_cur = const_cast<MemBaseline*>(&cur);
+  MemBaseline* pBL_prev = const_cast<MemBaseline*>(&prev);
+
+  // walk malloc callsites
+  MemPointerArrayIteratorImpl cur_malloc_itr(pBL_cur->_malloc_cs);
+  MemPointerArrayIteratorImpl prev_malloc_itr(pBL_prev->_malloc_cs);
+
+  MallocCallsitePointer*      cur_malloc_callsite =
+                  (MallocCallsitePointer*)cur_malloc_itr.current();
+  MallocCallsitePointer*      prev_malloc_callsite =
+                  (MallocCallsitePointer*)prev_malloc_itr.current();
+
+  while (cur_malloc_callsite != NULL || prev_malloc_callsite != NULL) {
+    if (prev_malloc_callsite == NULL ||
+        cur_malloc_callsite->addr() < prev_malloc_callsite->addr()) {
+      _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(),
+        amount_in_current_scale(cur_malloc_callsite->amount()),
+        cur_malloc_callsite->count(),
+        diff_in_current_scale(cur_malloc_callsite->amount(), 0),
+        diff(cur_malloc_callsite->count(), 0));
+      cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next();
+    } else if (prev_malloc_callsite == NULL ||
+               cur_malloc_callsite->addr() > prev_malloc_callsite->addr()) {
+      _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(),
+        amount_in_current_scale(prev_malloc_callsite->amount()),
+        prev_malloc_callsite->count(),
+        diff_in_current_scale(0, prev_malloc_callsite->amount()),
+        diff(0, prev_malloc_callsite->count()));
+      prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next();
+    } else {  // the same callsite
+      _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(),
+        amount_in_current_scale(cur_malloc_callsite->amount()),
+        cur_malloc_callsite->count(),
+        diff_in_current_scale(cur_malloc_callsite->amount(), prev_malloc_callsite->amount()),
+        diff(cur_malloc_callsite->count(), prev_malloc_callsite->count()));
+      cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next();
+      prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next();
+    }
+  }
+
+  // walk virtual memory callsite
+  MemPointerArrayIteratorImpl cur_vm_itr(pBL_cur->_vm_cs);
+  MemPointerArrayIteratorImpl prev_vm_itr(pBL_prev->_vm_cs);
+  VMCallsitePointer*          cur_vm_callsite = (VMCallsitePointer*)cur_vm_itr.current();
+  VMCallsitePointer*          prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.current();
+  while (cur_vm_callsite != NULL || prev_vm_callsite != NULL) {
+    if (prev_vm_callsite == NULL || cur_vm_callsite->addr() < prev_vm_callsite->addr()) {
+      _outputer.diff_virtual_memory_callsite(cur_vm_callsite->addr(),
+        amount_in_current_scale(cur_vm_callsite->reserved_amount()),
+        amount_in_current_scale(cur_vm_callsite->committed_amount()),
+        diff_in_current_scale(cur_vm_callsite->reserved_amount(), 0),
+        diff_in_current_scale(cur_vm_callsite->committed_amount(), 0));
+      cur_vm_callsite = (VMCallsitePointer*)cur_vm_itr.next();
+    } else if (cur_vm_callsite == NULL || cur_vm_callsite->addr() > prev_vm_callsite->addr()) {
+      _outputer.diff_virtual_memory_callsite(prev_vm_callsite->addr(),
+        amount_in_current_scale(prev_vm_callsite->reserved_amount()),
+        amount_in_current_scale(prev_vm_callsite->committed_amount()),
+        diff_in_current_scale(0, prev_vm_callsite->reserved_amount()),
+        diff_in_current_scale(0, prev_vm_callsite->committed_amount()));
+      prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.next();
+    } else { // the same callsite
+      _outputer.diff_virtual_memory_callsite(cur_vm_callsite->addr(),
+        amount_in_current_scale(cur_vm_callsite->reserved_amount()),
+        amount_in_current_scale(cur_vm_callsite->committed_amount()),
+        diff_in_current_scale(cur_vm_callsite->reserved_amount(), prev_vm_callsite->reserved_amount()),
+        diff_in_current_scale(cur_vm_callsite->committed_amount(), prev_vm_callsite->committed_amount()));
+      cur_vm_callsite  = (VMCallsitePointer*)cur_vm_itr.next();
+      prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.next();
+    }
+  }
+
+  _outputer.done_callsite();
+}
+
+size_t BaselineReporter::amount_in_current_scale(size_t amt) const {
+  return (size_t)(((float)amt/(float)_scale) + 0.5);
+}
+
+int BaselineReporter::diff_in_current_scale(size_t value1, size_t value2) const {
+  return (int)(((float)value1 - (float)value2)/((float)_scale) + 0.5);
+}
+
+int BaselineReporter::diff(size_t value1, size_t value2) const {
+  return ((int)value1 - (int)value2);
+}
+
+void BaselineTTYOutputer::start(size_t scale, bool report_diff) {
+  _scale = scale;
+  _output->print_cr(" ");
+  _output->print_cr("Native Memory Tracking:");
+  _output->print_cr(" ");
+}
+
+void BaselineTTYOutputer::done() {
+
+}
+
+void BaselineTTYOutputer::total_usage(size_t total_reserved, size_t total_committed) {
+  const char* unit = memory_unit(_scale);
+  _output->print_cr("Total:  reserved=%d%s,  committed=%d%s",
+    total_reserved, unit, total_committed, unit);
+}
+
+void BaselineTTYOutputer::start_category_summary() {
+  _output->print_cr(" ");
+}
+
+/**
+ * report a summary of memory type
+ */
+void BaselineTTYOutputer::category_summary(MEMFLAGS type,
+  size_t reserved_amt, size_t committed_amt, size_t malloc_amt,
+  size_t malloc_count, size_t arena_amt, size_t arena_count) {
+
+  // we report mtThreadStack under mtThread category
+  if (type == mtThreadStack) {
+    assert(malloc_amt == 0 && malloc_count == 0 && arena_amt == 0,
+      "Just check");
+    _thread_stack_reserved = reserved_amt;
+    _thread_stack_committed = committed_amt;
+  } else {
+    const char* unit = memory_unit(_scale);
+    size_t total_reserved = (reserved_amt + malloc_amt + arena_amt);
+    size_t total_committed = (committed_amt + malloc_amt + arena_amt);
+    if (type == mtThread) {
+      total_reserved += _thread_stack_reserved;
+      total_committed += _thread_stack_committed;
+    }
+
+    if (total_reserved > 0) {
+      _output->print_cr("-%26s (reserved=%d%s, committed=%d%s)",
+        MemBaseline::type2name(type), total_reserved, unit,
+        total_committed, unit);
+
+      if (type == mtClass) {
+        _output->print_cr("%27s (classes #%d)", " ", _num_of_classes);
+      } else if (type == mtThread) {
+        _output->print_cr("%27s (thread #%d)", " ", _num_of_threads);
+        _output->print_cr("%27s (stack: reserved=%d%s, committed=%d%s)", " ",
+          _thread_stack_reserved, unit, _thread_stack_committed, unit);
+      }
+
+      if (malloc_amt > 0) {
+        if (type != mtChunk) {
+          _output->print_cr("%27s (malloc=%d%s, #%d)", " ", malloc_amt, unit,
+            malloc_count);
+        } else {
+          _output->print_cr("%27s (malloc=%d%s)", " ", malloc_amt, unit);
+        }
+      }
+
+      if (reserved_amt > 0) {
+        _output->print_cr("%27s (mmap: reserved=%d%s, committed=%d%s)",
+          " ", reserved_amt, unit, committed_amt, unit);
+      }
+
+      if (arena_amt > 0) {
+        _output->print_cr("%27s (arena=%d%s, #%d)", " ", arena_amt, unit, arena_count);
+      }
+
+      _output->print_cr(" ");
+    }
+  }
+}
+
+void BaselineTTYOutputer::done_category_summary() {
+  _output->print_cr(" ");
+}
+
+void BaselineTTYOutputer::start_callsite() {
+  _output->print_cr("Details:");
+  _output->print_cr(" ");
+}
+
+void BaselineTTYOutputer::done_callsite() {
+  _output->print_cr(" ");
+}
+
+void BaselineTTYOutputer::malloc_callsite(address pc, size_t malloc_amt,
+  size_t malloc_count) {
+  if (malloc_amt > 0) {
+    const char* unit = memory_unit(_scale);
+    char buf[64];
+    int  offset;
+    if (pc == 0) {
+      _output->print("[BOOTSTRAP]%18s", " ");
+    } else if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
+      _output->print_cr("[" PTR_FORMAT "] %s+0x%x", pc, buf, offset);
+      _output->print("%28s", " ");
+    } else {
+      _output->print("[" PTR_FORMAT "]%18s", pc, " ");
+    }
+
+    _output->print_cr("(malloc=%d%s #%d)", malloc_amt, unit, malloc_count);
+    _output->print_cr(" ");
+  }
+}
+
+void BaselineTTYOutputer::virtual_memory_callsite(address pc, size_t reserved_amt,
+  size_t committed_amt) {
+  if (reserved_amt > 0) {
+    const char* unit = memory_unit(_scale);
+    char buf[64];
+    int  offset;
+    if (pc == 0) {
+      _output->print("[BOOTSTRAP]%18s", " ");
+    } else if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
+      _output->print_cr("[" PTR_FORMAT "] %s+0x%x", pc, buf, offset);
+      _output->print("%28s", " ");
+    } else {
+      _output->print("[" PTR_FORMAT "]%18s", " ");
+    }
+
+    _output->print_cr("(mmap: reserved=%d%s, committed=%d%s)",
+      reserved_amt, unit, committed_amt, unit);
+    _output->print_cr(" ");
+  }
+}
+
+void BaselineTTYOutputer::diff_total_usage(size_t total_reserved,
+  size_t total_committed, int reserved_diff, int committed_diff) {
+  const char* unit = memory_unit(_scale);
+  _output->print_cr("Total:  reserved=%d%s  %+d%s, committed=%d%s %+d%s",
+    total_reserved, unit, reserved_diff, unit, total_committed, unit,
+    committed_diff, unit);
+}
+
+void BaselineTTYOutputer::diff_category_summary(MEMFLAGS type,
+  size_t cur_reserved_amt, size_t cur_committed_amt,
+  size_t cur_malloc_amt, size_t cur_malloc_count,
+  size_t cur_arena_amt, size_t cur_arena_count,
+  int reserved_diff, int committed_diff, int malloc_diff,
+  int malloc_count_diff, int arena_diff, int arena_count_diff) {
+
+  if (type == mtThreadStack) {
+    assert(cur_malloc_amt == 0 && cur_malloc_count == 0 &&
+      cur_arena_amt == 0, "Just check");
+    _thread_stack_reserved = cur_reserved_amt;
+    _thread_stack_committed = cur_committed_amt;
+    _thread_stack_reserved_diff = reserved_diff;
+    _thread_stack_committed_diff = committed_diff;
+  } else {
+    const char* unit = memory_unit(_scale);
+    size_t total_reserved = (cur_reserved_amt + cur_malloc_amt + cur_arena_amt);
+    // nothing to report in this category
+    if (total_reserved == 0) {
+      return;
+    }
+    int    diff_reserved = (reserved_diff + malloc_diff + arena_diff);
+
+    // category summary
+    _output->print("-%26s (reserved=%d%s", MemBaseline::type2name(type),
+      total_reserved, unit);
+
+    if (diff_reserved != 0) {
+      _output->print(" %+d%s", diff_reserved, unit);
+    }
+
+    size_t total_committed = cur_committed_amt + cur_malloc_amt + cur_arena_amt;
+    _output->print(", committed=%d%s", total_committed, unit);
+
+    int total_committed_diff = committed_diff + malloc_diff + arena_diff;
+    if (total_committed_diff != 0) {
+      _output->print(" %+d%s", total_committed_diff, unit);
+    }
+
+    _output->print_cr(")");
+
+    // special cases
+    if (type == mtClass) {
+      _output->print("%27s (classes #%d", " ", _num_of_classes);
+      if (_num_of_classes_diff != 0) {
+        _output->print(" %+d", _num_of_classes_diff);
+      }
+      _output->print_cr(")");
+    } else if (type == mtThread) {
+      // thread count
+      _output->print("%27s (thread #%d", " ", _num_of_threads);
+      if (_num_of_threads_diff != 0) {
+        _output->print_cr(" %+d)", _num_of_threads_diff);
+      } else {
+        _output->print_cr(")");
+      }
+      _output->print("%27s (stack: reserved=%d%s", " ", _thread_stack_reserved, unit);
+      if (_thread_stack_reserved_diff != 0) {
+        _output->print(" %+d%s", _thread_stack_reserved_diff, unit);
+      }
+
+      _output->print(", committed=%d%s", _thread_stack_committed, unit);
+      if (_thread_stack_committed_diff != 0) {
+        _output->print(" %+d%s",_thread_stack_committed_diff, unit);
+      }
+
+      _output->print_cr(")");
+    }
+
+    // malloc'd memory
+    if (cur_malloc_amt > 0) {
+      _output->print("%27s (malloc=%d%s", " ", cur_malloc_amt, unit);
+      if (malloc_diff != 0) {
+        _output->print(" %+d%s", malloc_diff, unit);
+      }
+      if (type != mtChunk) {
+        _output->print(", #%d", cur_malloc_count);
+        if (malloc_count_diff) {
+          _output->print(" %+d", malloc_count_diff);
+        }
+      }
+      _output->print_cr(")");
+    }
+
+    // mmap'd memory
+    if (cur_reserved_amt > 0) {
+      _output->print("%27s (mmap: reserved=%d%s", " ", cur_reserved_amt, unit);
+      if (reserved_diff != 0) {
+        _output->print(" %+d%s", reserved_diff, unit);
+      }
+
+      _output->print(", committed=%d%s", cur_committed_amt, unit);
+      if (committed_diff != 0) {
+        _output->print(" %+d%s", committed_diff, unit);
+      }
+      _output->print_cr(")");
+    }
+
+    // arena memory
+    if (cur_arena_amt > 0) {
+      _output->print("%27s (arena=%d%s", " ", cur_arena_amt, unit);
+      if (arena_diff != 0) {
+        _output->print(" %+d%s", arena_diff, unit);
+      }
+      _output->print(", #%d", cur_arena_count);
+      if (arena_count_diff != 0) {
+        _output->print(" %+d", arena_count_diff);
+      }
+      _output->print_cr(")");
+    }
+
+    _output->print_cr(" ");
+  }
+}
+
+void BaselineTTYOutputer::diff_malloc_callsite(address pc,
+    size_t cur_malloc_amt, size_t cur_malloc_count,
+    int malloc_diff, int malloc_count_diff) {
+  if (malloc_diff != 0) {
+    const char* unit = memory_unit(_scale);
+    char buf[64];
+    int  offset;
+    if (pc == 0) {
+      _output->print_cr("[BOOTSTRAP]%18s", " ");
+    } else {
+      if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
+        _output->print_cr("[" PTR_FORMAT "] %s+0x%x", pc, buf, offset);
+        _output->print("%28s", " ");
+      } else {
+        _output->print("[" PTR_FORMAT "]%18s", pc, " ");
+      }
+    }
+
+    _output->print("(malloc=%d%s", cur_malloc_amt, unit);
+    if (malloc_diff != 0) {
+      _output->print(" %+d%s", malloc_diff, unit);
+    }
+    _output->print(", #%d", cur_malloc_count);
+    if (malloc_count_diff != 0) {
+      _output->print(" %+d", malloc_count_diff);
+    }
+    _output->print_cr(")");
+    _output->print_cr(" ");
+  }
+}
+
+void BaselineTTYOutputer::diff_virtual_memory_callsite(address pc,
+    size_t cur_reserved_amt, size_t cur_committed_amt,
+    int reserved_diff, int committed_diff) {
+  if (reserved_diff != 0 || committed_diff != 0) {
+    const char* unit = memory_unit(_scale);
+    char buf[64];
+    int  offset;
+    if (pc == 0) {
+      _output->print_cr("[BOOSTRAP]%18s", " ");
+    } else {
+      if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
+        _output->print_cr("[" PTR_FORMAT "] %s+0x%x", pc, buf, offset);
+        _output->print("%28s", " ");
+      } else {
+        _output->print("[" PTR_FORMAT "]%18s", " ");
+      }
+    }
+
+    _output->print("(mmap: reserved=%d%s", cur_reserved_amt, unit);
+    if (reserved_diff != 0) {
+      _output->print(" %+d%s", reserved_diff, unit);
+    }
+    _output->print(", committed=%d%s", cur_committed_amt, unit);
+    if (committed_diff != 0) {
+      _output->print(" %+d%s", committed_diff, unit);
+    }
+    _output->print_cr(")");
+    _output->print_cr(" ");
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/memReporter.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_MEM_REPORTER_HPP
+#define SHARE_VM_SERVICES_MEM_REPORTER_HPP
+
+#include "runtime/mutexLocker.hpp"
+#include "services/memBaseline.hpp"
+#include "services/memTracker.hpp"
+#include "utilities/ostream.hpp"
+
+/*
+ * MemBaselineReporter reports data to this outputer class,
+ * ReportOutputer is responsible for format, store and redirect
+ * the data to the final destination.
+ */
+class BaselineOutputer : public StackObj {
+ public:
+  // start to report memory usage in specified scale.
+  // if report_diff = true, the reporter reports baseline comparison
+  // information.
+
+  virtual void start(size_t scale, bool report_diff = false) = 0;
+  // Done reporting
+  virtual void done() = 0;
+
+  /* report baseline summary information */
+  virtual void total_usage(size_t total_reserved,
+                           size_t total_committed) = 0;
+  virtual void num_of_classes(size_t classes) = 0;
+  virtual void num_of_threads(size_t threads) = 0;
+
+  virtual void thread_info(size_t stack_reserved_amt, size_t stack_committed_amt) = 0;
+
+  /* report baseline summary comparison */
+  virtual void diff_total_usage(size_t total_reserved,
+                                size_t total_committed,
+                                int reserved_diff,
+                                int committed_diff) = 0;
+  virtual void diff_num_of_classes(size_t classes, int diff) = 0;
+  virtual void diff_num_of_threads(size_t threads, int diff) = 0;
+
+  virtual void diff_thread_info(size_t stack_reserved, size_t stack_committed,
+        int stack_reserved_diff, int stack_committed_diff) = 0;
+
+
+  /*
+   * memory summary by memory types.
+   * for each memory type, following summaries are reported:
+   *  - reserved amount, committed amount
+   *  - malloc'd amount, malloc count
+   *  - arena amount, arena count
+   */
+
+  // start reporting memory summary by memory type
+  virtual void start_category_summary() = 0;
+
+  virtual void category_summary(MEMFLAGS type, size_t reserved_amt,
+                                size_t committed_amt,
+                                size_t malloc_amt, size_t malloc_count,
+                                size_t arena_amt, size_t arena_count) = 0;
+
+  virtual void diff_category_summary(MEMFLAGS type, size_t cur_reserved_amt,
+                                size_t cur_committed_amt,
+                                size_t cur_malloc_amt, size_t cur_malloc_count,
+                                size_t cur_arena_amt, size_t cur_arena_count,
+                                int reserved_diff, int committed_diff, int malloc_diff,
+                                int malloc_count_diff, int arena_diff,
+                                int arena_count_diff) = 0;
+
+  virtual void done_category_summary() = 0;
+
+  /*
+   *  Report callsite information
+   */
+  virtual void start_callsite() = 0;
+  virtual void malloc_callsite(address pc, size_t malloc_amt, size_t malloc_count) = 0;
+  virtual void virtual_memory_callsite(address pc, size_t reserved_amt, size_t committed_amt) = 0;
+
+  virtual void diff_malloc_callsite(address pc, size_t cur_malloc_amt, size_t cur_malloc_count,
+              int malloc_diff, int malloc_count_diff) = 0;
+  virtual void diff_virtual_memory_callsite(address pc, size_t cur_reserved_amt, size_t cur_committed_amt,
+              int reserved_diff, int committed_diff) = 0;
+
+  virtual void done_callsite() = 0;
+
+  // return current scale in "KB", "MB" or "GB"
+  static const char* memory_unit(size_t scale);
+};
+
+/*
+ * This class reports processed data from a baseline or
+ * the changes between the two baseline.
+ */
+class BaselineReporter : public StackObj {
+ private:
+  BaselineOutputer&  _outputer;
+  size_t             _scale;
+
+ public:
+  // construct a reporter that reports memory usage
+  // in specified scale
+  BaselineReporter(BaselineOutputer& outputer, size_t scale = K):
+    _outputer(outputer) {
+    _scale = scale;
+  }
+  virtual void report_baseline(const MemBaseline& baseline, bool summary_only = false);
+  virtual void diff_baselines(const MemBaseline& cur, const MemBaseline& prev,
+                              bool summary_only = false);
+
+  void set_scale(size_t scale);
+  size_t scale() const { return _scale; }
+
+ private:
+  void report_summaries(const MemBaseline& baseline);
+  void report_callsites(const MemBaseline& baseline);
+
+  void diff_summaries(const MemBaseline& cur, const MemBaseline& prev);
+  void diff_callsites(const MemBaseline& cur, const MemBaseline& prev);
+
+  // calculate memory size in current memory scale
+  size_t amount_in_current_scale(size_t amt) const;
+  // diff two unsigned values in current memory scale
+  int    diff_in_current_scale(size_t value1, size_t value2) const;
+  // diff two unsigned value
+  int    diff(size_t value1, size_t value2) const;
+};
+
+/*
+ * tty output implementation. Native memory tracking
+ * DCmd uses this outputer.
+ */
+class BaselineTTYOutputer : public BaselineOutputer {
+ private:
+  size_t         _scale;
+
+  size_t         _num_of_classes;
+  size_t         _num_of_threads;
+  size_t         _thread_stack_reserved;
+  size_t         _thread_stack_committed;
+
+  int            _num_of_classes_diff;
+  int            _num_of_threads_diff;
+  int            _thread_stack_reserved_diff;
+  int            _thread_stack_committed_diff;
+
+  outputStream*  _output;
+
+ public:
+  BaselineTTYOutputer(outputStream* st) {
+    _scale = K;
+    _num_of_classes = 0;
+    _num_of_threads = 0;
+    _thread_stack_reserved = 0;
+    _thread_stack_committed = 0;
+    _num_of_classes_diff = 0;
+    _num_of_threads_diff = 0;
+    _thread_stack_reserved_diff = 0;
+    _thread_stack_committed_diff = 0;
+    _output = st;
+  }
+
+  // begin reporting memory usage in specified scale
+  void start(size_t scale, bool report_diff = false);
+  // done reporting
+  void done();
+
+  // total memory usage
+  void total_usage(size_t total_reserved,
+                   size_t total_committed);
+  // report total loaded classes
+  void num_of_classes(size_t classes) {
+    _num_of_classes = classes;
+  }
+
+  void num_of_threads(size_t threads) {
+    _num_of_threads = threads;
+  }
+
+  void thread_info(size_t stack_reserved_amt, size_t stack_committed_amt) {
+    _thread_stack_reserved = stack_reserved_amt;
+    _thread_stack_committed = stack_committed_amt;
+  }
+
+  void diff_total_usage(size_t total_reserved,
+                        size_t total_committed,
+                        int reserved_diff,
+                        int committed_diff);
+
+  void diff_num_of_classes(size_t classes, int diff) {
+    _num_of_classes = classes;
+    _num_of_classes_diff = diff;
+  }
+
+  void diff_num_of_threads(size_t threads, int diff) {
+    _num_of_threads = threads;
+    _num_of_threads_diff = diff;
+  }
+
+  void diff_thread_info(size_t stack_reserved_amt, size_t stack_committed_amt,
+               int stack_reserved_diff, int stack_committed_diff) {
+    _thread_stack_reserved = stack_reserved_amt;
+    _thread_stack_committed = stack_committed_amt;
+    _thread_stack_reserved_diff = stack_reserved_diff;
+    _thread_stack_committed_diff = stack_committed_diff;
+  }
+
+  /*
+   * Report memory summary categoriuzed by memory types.
+   * For each memory type, following summaries are reported:
+   *  - reserved amount, committed amount
+   *  - malloc-ed amount, malloc count
+   *  - arena amount, arena count
+   */
+  // start reporting memory summary by memory type
+  void start_category_summary();
+  void category_summary(MEMFLAGS type, size_t reserved_amt, size_t committed_amt,
+                               size_t malloc_amt, size_t malloc_count,
+                               size_t arena_amt, size_t arena_count);
+
+  void diff_category_summary(MEMFLAGS type, size_t cur_reserved_amt,
+                          size_t cur_committed_amt,
+                          size_t cur_malloc_amt, size_t cur_malloc_count,
+                          size_t cur_arena_amt, size_t cur_arena_count,
+                          int reserved_diff, int committed_diff, int malloc_diff,
+                          int malloc_count_diff, int arena_diff,
+                          int arena_count_diff);
+
+  void done_category_summary();
+
+  /*
+   *  Report callsite information
+   */
+  void start_callsite();
+  void malloc_callsite(address pc, size_t malloc_amt, size_t malloc_count);
+  void virtual_memory_callsite(address pc, size_t reserved_amt, size_t committed_amt);
+
+  void diff_malloc_callsite(address pc, size_t cur_malloc_amt, size_t cur_malloc_count,
+              int malloc_diff, int malloc_count_diff);
+  void diff_virtual_memory_callsite(address pc, size_t cur_reserved_amt, size_t cur_committed_amt,
+              int reserved_diff, int committed_diff);
+
+  void done_callsite();
+};
+
+
+#endif // SHARE_VM_SERVICES_MEM_REPORTER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/memSnapshot.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -0,0 +1,463 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "utilities/decoder.hpp"
+#include "services/memBaseline.hpp"
+#include "services/memPtr.hpp"
+#include "services/memPtrArray.hpp"
+#include "services/memSnapshot.hpp"
+#include "services/memTracker.hpp"
+
+
+// stagging data groups the data of a VM memory range, so we can consolidate
+// them into one record during the walk
+bool StagingWalker::consolidate_vm_records(VMMemRegionEx* vm_rec) {
+  MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
+  assert(cur != NULL && cur->is_vm_pointer(), "not a virtual memory pointer");
+
+  jint cur_seq;
+  jint next_seq;
+
+  bool trackCallsite = MemTracker::track_callsite();
+
+  if (trackCallsite) {
+    vm_rec->init((MemPointerRecordEx*)cur);
+    cur_seq = ((SeqMemPointerRecordEx*)cur)->seq();
+  } else {
+    vm_rec->init((MemPointerRecord*)cur);
+    cur_seq = ((SeqMemPointerRecord*)cur)->seq();
+  }
+
+  // only can consolidate when we have allocation record,
+  // which contains virtual memory range
+  if (!cur->is_allocation_record()) {
+    _itr.next();
+    return true;
+  }
+
+  // allocation range
+  address base = cur->addr();
+  address end = base + cur->size();
+
+  MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
+  // if the memory range is alive
+  bool live_vm_rec = true;
+  while (next != NULL && next->is_vm_pointer()) {
+    if (next->is_allocation_record()) {
+      assert(next->addr() >= base, "sorting order or overlapping");
+      break;
+    }
+
+    if (trackCallsite) {
+      next_seq = ((SeqMemPointerRecordEx*)next)->seq();
+    } else {
+      next_seq = ((SeqMemPointerRecord*)next)->seq();
+    }
+
+    if (next_seq < cur_seq) {
+      _itr.next();
+      next = (MemPointerRecord*)_itr.peek_next();
+      continue;
+    }
+
+    if (next->is_deallocation_record()) {
+      if (next->addr() == base && next->size() == cur->size()) {
+        // the virtual memory range has been released
+        _itr.next();
+        live_vm_rec = false;
+        break;
+      } else if (next->addr() < end) { // partial release
+        vm_rec->partial_release(next->addr(), next->size());
+        _itr.next();
+      } else {
+        break;
+      }
+    } else if (next->is_commit_record()) {
+      if (next->addr() >= base && next->addr() + next->size() <= end) {
+        vm_rec->commit(next->size());
+        _itr.next();
+      } else {
+        assert(next->addr() >= base, "sorting order or overlapping");
+        break;
+      }
+    } else if (next->is_uncommit_record()) {
+      if (next->addr() >= base && next->addr() + next->size() <= end) {
+        vm_rec->uncommit(next->size());
+        _itr.next();
+      } else {
+        assert(next->addr() >= end, "sorting order or overlapping");
+        break;
+      }
+    } else if (next->is_type_tagging_record()) {
+      if (next->addr() >= base && next->addr() < end ) {
+        vm_rec->tag(next->flags());
+        _itr.next();
+      } else {
+          break;
+      }
+    } else {
+      assert(false, "unknown record type");
+    }
+    next = (MemPointerRecord*)_itr.peek_next();
+  }
+  _itr.next();
+  return live_vm_rec;
+}
+
+MemPointer* StagingWalker::next() {
+  MemPointerRecord* cur_p = (MemPointerRecord*)_itr.current();
+  if (cur_p == NULL) {
+    _end_of_array = true;
+    return NULL;
+  }
+
+  MemPointerRecord* next_p;
+  if (cur_p->is_vm_pointer()) {
+    _is_vm_record = true;
+    if (!consolidate_vm_records(&_vm_record)) {
+      return next();
+    }
+  } else { // malloc-ed pointer
+    _is_vm_record = false;
+    next_p = (MemPointerRecord*)_itr.peek_next();
+    if (next_p != NULL && next_p->addr() == cur_p->addr()) {
+      assert(cur_p->is_allocation_record(), "sorting order");
+      assert(!next_p->is_allocation_record(), "sorting order");
+      _itr.next();
+      if (cur_p->seq() < next_p->seq()) {
+        cur_p = next_p;
+      }
+    }
+    if (MemTracker::track_callsite()) {
+      _malloc_record.init((MemPointerRecordEx*)cur_p);
+    } else {
+      _malloc_record.init((MemPointerRecord*)cur_p);
+    }
+
+    _itr.next();
+  }
+  return current();
+}
+
+MemSnapshot::MemSnapshot() {
+  if (MemTracker::track_callsite()) {
+    _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>();
+    _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true);
+    _staging_area = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
+  } else {
+    _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>();
+    _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true);
+    _staging_area = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
+  }
+
+  _lock = new (std::nothrow) Mutex(Monitor::native, "memSnapshotLock");
+  NOT_PRODUCT(_untracked_count = 0;)
+}
+
+MemSnapshot::~MemSnapshot() {
+  assert(MemTracker::shutdown_in_progress(), "native memory tracking still on");
+  {
+    MutexLockerEx locker(_lock);
+    if (_staging_area != NULL) {
+      delete _staging_area;
+      _staging_area = NULL;
+    }
+
+    if (_alloc_ptrs != NULL) {
+      delete _alloc_ptrs;
+      _alloc_ptrs = NULL;
+    }
+
+    if (_vm_ptrs != NULL) {
+      delete _vm_ptrs;
+      _vm_ptrs = NULL;
+    }
+  }
+
+  if (_lock != NULL) {
+    delete _lock;
+    _lock = NULL;
+  }
+}
+
+void MemSnapshot::copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
+  assert(dest != NULL && src != NULL, "Just check");
+  assert(dest->addr() == src->addr(), "Just check");
+
+  MEMFLAGS flags = dest->flags();
+
+  if (MemTracker::track_callsite()) {
+    *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src;
+  } else {
+    *dest = *src;
+  }
+}
+
+
+// merge a per-thread memory recorder to the staging area
+bool MemSnapshot::merge(MemRecorder* rec) {
+  assert(rec != NULL && !rec->out_of_memory(), "Just check");
+
+  // out of memory
+  if (_staging_area == NULL || _staging_area->out_of_memory()) {
+    return false;
+  }
+
+  SequencedRecordIterator itr(rec->pointer_itr());
+
+  MutexLockerEx lock(_lock, true);
+  MemPointerIterator staging_itr(_staging_area);
+  MemPointerRecord *p1, *p2;
+  p1 = (MemPointerRecord*) itr.current();
+  while (p1 != NULL) {
+    p2 = (MemPointerRecord*)staging_itr.locate(p1->addr());
+    // we have not seen this memory block, so just add to staging area
+    if (p2 == NULL) {
+      if (!staging_itr.insert(p1)) {
+        return false;
+      }
+    } else if (p1->addr() == p2->addr()) {
+      MemPointerRecord* staging_next = (MemPointerRecord*)staging_itr.peek_next();
+      // a memory block can have many tagging records, find right one to replace or
+      // right position to insert
+      while (staging_next != NULL && staging_next->addr() == p1->addr()) {
+        if ((staging_next->flags() & MemPointerRecord::tag_masks) <=
+          (p1->flags() & MemPointerRecord::tag_masks)) {
+          p2 = (MemPointerRecord*)staging_itr.next();
+          staging_next = (MemPointerRecord*)staging_itr.peek_next();
+        } else {
+          break;
+        }
+      }
+      int df = (p1->flags() & MemPointerRecord::tag_masks) -
+        (p2->flags() & MemPointerRecord::tag_masks);
+      if (df == 0) {
+        assert(p1->seq() > 0, "not sequenced");
+        assert(p2->seq() > 0, "not sequenced");
+        if (p1->seq() > p2->seq()) {
+          copy_pointer(p2, p1);
+        }
+      } else if (df < 0) {
+        if (!staging_itr.insert(p1)) {
+          return false;
+        }
+      } else {
+        if (!staging_itr.insert_after(p1)) {
+          return false;
+        }
+      }
+    } else if (p1->addr() < p2->addr()) {
+      if (!staging_itr.insert(p1)) {
+        return false;
+      }
+    } else {
+      if (!staging_itr.insert_after(p1)) {
+        return false;
+      }
+    }
+    p1 = (MemPointerRecord*)itr.next();
+  }
+  NOT_PRODUCT(void check_staging_data();)
+  return true;
+}
+
+
+
+// promote data to next generation
+void MemSnapshot::promote() {
+  assert(_alloc_ptrs != NULL && _staging_area != NULL && _vm_ptrs != NULL,
+    "Just check");
+  MutexLockerEx lock(_lock, true);
+  StagingWalker walker(_staging_area);
+  MemPointerIterator malloc_itr(_alloc_ptrs);
+  VMMemPointerIterator vm_itr(_vm_ptrs);
+  MemPointer* cur = walker.current();
+  while (cur != NULL) {
+    if (walker.is_vm_record()) {
+      VMMemRegion* cur_vm = (VMMemRegion*)cur;
+      VMMemRegion* p = (VMMemRegion*)vm_itr.locate(cur_vm->addr());
+      cur_vm = (VMMemRegion*)cur;
+      if (p != NULL && (p->contains(cur_vm) || p->base() == cur_vm->base())) {
+        assert(p->is_reserve_record() ||
+          p->is_commit_record(), "wrong vm record type");
+        // resize existing reserved range
+        if (cur_vm->is_reserve_record() && p->base() == cur_vm->base()) {
+          assert(cur_vm->size() >= p->committed_size(), "incorrect resizing");
+          p->set_reserved_size(cur_vm->size());
+        } else if (cur_vm->is_commit_record()) {
+          p->commit(cur_vm->committed_size());
+        } else if (cur_vm->is_uncommit_record()) {
+          p->uncommit(cur_vm->committed_size());
+          if (!p->is_reserve_record() && p->committed_size() == 0) {
+            vm_itr.remove();
+          }
+        } else if (cur_vm->is_type_tagging_record()) {
+          p->tag(cur_vm->flags());
+        } else if (cur_vm->is_release_record()) {
+          if (cur_vm->base() == p->base() && cur_vm->size() == p->size()) {
+            // release the whole range
+            vm_itr.remove();
+          } else {
+            // partial release
+            p->partial_release(cur_vm->base(), cur_vm->size());
+          }
+        } else {
+          // we do see multiple reserver on the same vm range
+          assert((cur_vm->is_commit_record() || cur_vm->is_reserve_record()) &&
+             cur_vm->base() == p->base() && cur_vm->size() == p->size(), "bad record");
+          p->tag(cur_vm->flags());
+        }
+      } else {
+        if(cur_vm->is_reserve_record()) {
+          if (p == NULL || p->base() > cur_vm->base()) {
+            vm_itr.insert(cur_vm);
+          } else {
+            vm_itr.insert_after(cur_vm);
+          }
+        } else {
+#ifdef ASSERT
+          // In theory, we should assert without conditions. However, in case of native
+          // thread stack, NMT explicitly releases the thread stack in Thread's destructor,
+          // due to platform dependent behaviors. On some platforms, we see uncommit/release
+          // native thread stack, but some, we don't.
+          if (!cur_vm->is_uncommit_record() && !cur_vm->is_deallocation_record()) {
+            ShouldNotReachHere();
+          }
+#endif
+        }
+      }
+    } else {
+      MemPointerRecord* cur_p = (MemPointerRecord*)cur;
+      MemPointerRecord* p = (MemPointerRecord*)malloc_itr.locate(cur->addr());
+      if (p != NULL && cur_p->addr() == p->addr()) {
+        assert(p->is_allocation_record() || p->is_arena_size_record(), "untracked");
+        if (cur_p->is_allocation_record() || cur_p->is_arena_size_record()) {
+          copy_pointer(p, cur_p);
+        } else {   // deallocation record
+          assert(cur_p->is_deallocation_record(), "wrong record type");
+
+          // we are removing an arena record, we also need to remove its 'size'
+          // record behind it
+          if (p->is_arena_record()) {
+            MemPointerRecord* next_p = (MemPointerRecord*)malloc_itr.peek_next();
+            if (next_p->is_arena_size_record()) {
+              assert(next_p->is_size_record_of_arena(p), "arena records dont match");
+              malloc_itr.remove();
+            }
+          }
+          malloc_itr.remove();
+        }
+      } else {
+        if (cur_p->is_arena_size_record()) {
+          MemPointerRecord* prev_p = (MemPointerRecord*)malloc_itr.peek_prev();
+          if (prev_p != NULL &&
+             (!prev_p->is_arena_record() || !cur_p->is_size_record_of_arena(prev_p))) {
+            // arena already deallocated
+            cur_p = NULL;
+          }
+        }
+        if (cur_p != NULL) {
+          if (cur_p->is_allocation_record() || cur_p->is_arena_size_record()) {
+            if (p != NULL && cur_p->addr() > p->addr()) {
+              malloc_itr.insert_after(cur);
+            } else {
+              malloc_itr.insert(cur);
+            }
+          }
+#ifndef PRODUCT
+          else if (!has_allocation_record(cur_p->addr())){
+            // NMT can not track some startup memory, which allocated before NMT
+            // is enabled
+            _untracked_count ++;
+          }
+#endif
+        }
+      }
+    }
+
+    cur = walker.next();
+  }
+  NOT_PRODUCT(check_malloc_pointers();)
+  _staging_area->shrink();
+  _staging_area->clear();
+}
+
+
+#ifdef ASSERT
+void MemSnapshot::print_snapshot_stats(outputStream* st) {
+  st->print_cr("Snapshot:");
+  st->print_cr("\tMalloced: %d/%d [%5.2f%%]  %dKB", _alloc_ptrs->length(), _alloc_ptrs->capacity(),
+    (100.0 * (float)_alloc_ptrs->length()) / (float)_alloc_ptrs->capacity(), _alloc_ptrs->instance_size()/K);
+
+  st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(),
+    (100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K);
+
+  st->print_cr("\tStaging:     %d/%d [%5.2f%%] %dKB", _staging_area->length(), _staging_area->capacity(),
+    (100.0 * (float)_staging_area->length()) / (float)_staging_area->capacity(), _staging_area->instance_size()/K);
+
+  st->print_cr("\tUntracked allocation: %d", _untracked_count);
+}
+
+void MemSnapshot::check_malloc_pointers() {
+  MemPointerArrayIteratorImpl mItr(_alloc_ptrs);
+  MemPointerRecord* p = (MemPointerRecord*)mItr.current();
+  MemPointerRecord* prev = NULL;
+  while (p != NULL) {
+    if (prev != NULL) {
+      assert(p->addr() >= prev->addr(), "sorting order");
+    }
+    prev = p;
+    p = (MemPointerRecord*)mItr.next();
+  }
+}
+
+void MemSnapshot::check_staging_data() {
+  MemPointerArrayIteratorImpl itr(_staging_area);
+  MemPointerRecord* cur = (MemPointerRecord*)itr.current();
+  MemPointerRecord* next = (MemPointerRecord*)itr.next();
+  while (next != NULL) {
+    assert((next->addr() > cur->addr()) ||
+      ((next->flags() & MemPointerRecord::tag_masks) >
+       (cur->flags() & MemPointerRecord::tag_masks)),
+       "sorting order");
+    cur = next;
+    next = (MemPointerRecord*)itr.next();
+  }
+}
+
+bool MemSnapshot::has_allocation_record(address addr) {
+  MemPointerArrayIteratorImpl itr(_staging_area);
+  MemPointerRecord* cur = (MemPointerRecord*)itr.current();
+  while (cur != NULL) {
+    if (cur->addr() == addr && cur->is_allocation_record()) {
+      return true;
+    }
+    cur = (MemPointerRecord*)itr.next();
+  }
+  return false;
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/memSnapshot.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
+#define SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/mutex.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "services/memBaseline.hpp"
+#include "services/memPtrArray.hpp"
+
+
+// Snapshot pointer array iterator
+
+// The pointer array contains malloc-ed pointers
+class MemPointerIterator : public MemPointerArrayIteratorImpl {
+ public:
+  MemPointerIterator(MemPointerArray* arr):
+    MemPointerArrayIteratorImpl(arr) {
+    assert(arr != NULL, "null array");
+  }
+
+#ifdef ASSERT
+  virtual bool is_dup_pointer(const MemPointer* ptr1,
+    const MemPointer* ptr2) const {
+    MemPointerRecord* p1 = (MemPointerRecord*)ptr1;
+    MemPointerRecord* p2 = (MemPointerRecord*)ptr2;
+
+    if (p1->addr() != p2->addr()) return false;
+    if ((p1->flags() & MemPointerRecord::tag_masks) !=
+        (p2->flags() & MemPointerRecord::tag_masks)) {
+      return false;
+    }
+    // we do see multiple commit/uncommit on the same memory, it is ok
+    return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
+           (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
+  }
+
+  virtual bool insert(MemPointer* ptr) {
+    if (_pos > 0) {
+      MemPointer* p1 = (MemPointer*)ptr;
+      MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
+      assert(!is_dup_pointer(p1, p2),
+        "dup pointer");
+    }
+     if (_pos < _array->length() -1) {
+      MemPointer* p1 = (MemPointer*)ptr;
+      MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
+      assert(!is_dup_pointer(p1, p2),
+        "dup pointer");
+     }
+    return _array->insert_at(ptr, _pos);
+  }
+
+  virtual bool insert_after(MemPointer* ptr) {
+    if (_pos > 0) {
+      MemPointer* p1 = (MemPointer*)ptr;
+      MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
+      assert(!is_dup_pointer(p1, p2),
+        "dup pointer");
+    }
+    if (_pos < _array->length() - 1) {
+      MemPointer* p1 = (MemPointer*)ptr;
+      MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
+
+      assert(!is_dup_pointer(p1, p2),
+        "dup pointer");
+     }
+    if (_array->insert_at(ptr, _pos + 1)) {
+      _pos ++;
+      return true;
+    }
+    return false;
+  }
+#endif
+
+  virtual MemPointer* locate(address addr) {
+    MemPointer* cur = current();
+    while (cur != NULL && cur->addr() < addr) {
+      cur = next();
+    }
+    return cur;
+  }
+};
+
+class VMMemPointerIterator : public MemPointerIterator {
+ public:
+  VMMemPointerIterator(MemPointerArray* arr):
+      MemPointerIterator(arr) {
+  }
+
+  // locate an exiting record that contains specified address, or
+  // the record, where the record with specified address, should
+  // be inserted
+  virtual MemPointer* locate(address addr) {
+    VMMemRegion* cur = (VMMemRegion*)current();
+    VMMemRegion* next_p;
+
+    while (cur != NULL) {
+      if (cur->base() > addr) {
+        return cur;
+      } else {
+        // find nearest existing range that has base address <= addr
+        next_p = (VMMemRegion*)peek_next();
+        if (next_p != NULL && next_p->base() <= addr) {
+          cur = (VMMemRegion*)next();
+          continue;
+        }
+      }
+
+      if (cur->is_reserve_record() &&
+        cur->base() <= addr &&
+        (cur->base() + cur->size() > addr)) {
+          return cur;
+      } else if (cur->is_commit_record() &&
+        cur->base() <= addr &&
+        (cur->base() + cur->committed_size() > addr)) {
+          return cur;
+      }
+      cur = (VMMemRegion*)next();
+    }
+    return NULL;
+  }
+
+#ifdef ASSERT
+  virtual bool is_dup_pointer(const MemPointer* ptr1,
+    const MemPointer* ptr2) const {
+    VMMemRegion* p1 = (VMMemRegion*)ptr1;
+    VMMemRegion* p2 = (VMMemRegion*)ptr2;
+
+    if (p1->addr() != p2->addr()) return false;
+    if ((p1->flags() & MemPointerRecord::tag_masks) !=
+        (p2->flags() & MemPointerRecord::tag_masks)) {
+      return false;
+    }
+    // we do see multiple commit/uncommit on the same memory, it is ok
+    return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
+           (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
+  }
+#endif
+};
+
+class StagingWalker : public MemPointerArrayIterator {
+ private:
+  MemPointerArrayIteratorImpl  _itr;
+  bool                         _is_vm_record;
+  bool                         _end_of_array;
+  VMMemRegionEx                _vm_record;
+  MemPointerRecordEx           _malloc_record;
+
+ public:
+  StagingWalker(MemPointerArray* arr): _itr(arr) {
+    _end_of_array = false;
+    next();
+  }
+
+  // return the pointer at current position
+  MemPointer* current() const {
+    if (_end_of_array) {
+      return NULL;
+    }
+    if (is_vm_record()) {
+      return (MemPointer*)&_vm_record;
+    } else {
+      return (MemPointer*)&_malloc_record;
+    }
+  }
+
+  // return the next pointer and advance current position
+  MemPointer* next();
+
+  // type of 'current' record
+  bool is_vm_record() const {
+    return _is_vm_record;
+  }
+
+  // return the next poinger without advancing current position
+  MemPointer* peek_next() const {
+    assert(false, "not supported");
+    return NULL;
+  }
+
+  MemPointer* peek_prev() const {
+    assert(false, "not supported");
+    return NULL;
+  }
+  // remove the pointer at current position
+  void remove() {
+    assert(false, "not supported");
+  }
+
+  // insert the pointer at current position
+  bool insert(MemPointer* ptr) {
+    assert(false, "not supported");
+    return false;
+  }
+
+  bool insert_after(MemPointer* ptr) {
+    assert(false, "not supported");
+    return false;
+  }
+
+ private:
+  // consolidate all records referring to this vm region
+  bool consolidate_vm_records(VMMemRegionEx* vm_rec);
+};
+
+class MemBaseline;
+
+class MemSnapshot : public CHeapObj<mtNMT> {
+ private:
+  // the following two arrays contain records of all known lived memory blocks
+  // live malloc-ed memory pointers
+  MemPointerArray*      _alloc_ptrs;
+  // live virtual memory pointers
+  MemPointerArray*      _vm_ptrs;
+
+  // stagging a generation's data, before
+  // it can be prompted to snapshot
+  MemPointerArray*      _staging_area;
+
+  // the lock to protect this snapshot
+  Monitor*              _lock;
+
+  NOT_PRODUCT(size_t    _untracked_count;)
+  friend class MemBaseline;
+
+ public:
+  MemSnapshot();
+  virtual ~MemSnapshot();
+
+  // if we are running out of native memory
+  bool out_of_memory() const {
+    return (_alloc_ptrs == NULL || _staging_area == NULL ||
+      _vm_ptrs == NULL || _lock == NULL ||
+      _alloc_ptrs->out_of_memory() ||
+      _staging_area->out_of_memory() ||
+      _vm_ptrs->out_of_memory());
+  }
+
+  // merge a per-thread memory recorder into staging area
+  bool merge(MemRecorder* rec);
+  // promote staged data to snapshot
+  void promote();
+
+
+  void wait(long timeout) {
+    assert(_lock != NULL, "Just check");
+    MonitorLockerEx locker(_lock);
+    locker.wait(true, timeout);
+  }
+
+  NOT_PRODUCT(void print_snapshot_stats(outputStream* st);)
+  NOT_PRODUCT(void check_staging_data();)
+  NOT_PRODUCT(void check_malloc_pointers();)
+  NOT_PRODUCT(bool has_allocation_record(address addr);)
+
+ private:
+   // copy pointer data from src to dest
+   void copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
+};
+
+
+#endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/memTrackWorker.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/threadCritical.hpp"
+#include "services/memTracker.hpp"
+#include "services/memTrackWorker.hpp"
+#include "utilities/decoder.hpp"
+#include "utilities/vmError.hpp"
+
+MemTrackWorker::MemTrackWorker() {
+  // create thread uses cgc thread type for now. We should revisit
+  // the option, or create new thread type.
+  _has_error = !os::create_thread(this, os::cgc_thread);
+  set_name("MemTrackWorker", 0);
+
+  // initial generation circuit buffer
+  if (!has_error()) {
+    _head = _tail = 0;
+    for(int index = 0; index < MAX_GENERATIONS; index ++) {
+      _gen[index] = NULL;
+    }
+  }
+  NOT_PRODUCT(_sync_point_count = 0;)
+  NOT_PRODUCT(_merge_count = 0;)
+  NOT_PRODUCT(_last_gen_in_use = 0;)
+}
+
+MemTrackWorker::~MemTrackWorker() {
+  for (int index = 0; index < MAX_GENERATIONS; index ++) {
+    MemRecorder* rc = _gen[index];
+    if (rc != NULL) {
+      delete rc;
+    }
+  }
+}
+
+void* MemTrackWorker::operator new(size_t size) {
+  assert(false, "use nothrow version");
+  return NULL;
+}
+
+void* MemTrackWorker::operator new(size_t size, const std::nothrow_t& nothrow_constant) {
+  return allocate(size, false, mtNMT);
+}
+
+void MemTrackWorker::start() {
+  os::start_thread(this);
+}
+
+/*
+ * Native memory tracking worker thread loop:
+ *   1. merge one generation of memory recorders to staging area
+ *   2. promote staging data to memory snapshot
+ *
+ * This thread can run through safepoint.
+ */
+
+void MemTrackWorker::run() {
+  assert(MemTracker::is_on(), "native memory tracking is off");
+  this->initialize_thread_local_storage();
+  this->record_stack_base_and_size();
+  MemSnapshot* snapshot = MemTracker::get_snapshot();
+  assert(snapshot != NULL, "Worker should not be started");
+  MemRecorder* rec;
+
+  while (!MemTracker::shutdown_in_progress()) {
+    NOT_PRODUCT(_last_gen_in_use = generations_in_use();)
+    {
+      // take a recorder from earliest generation in buffer
+      ThreadCritical tc;
+      rec = _gen[_head];
+      if (rec != NULL) {
+        _gen[_head] = rec->next();
+      }
+      assert(count_recorder(_gen[_head]) <= MemRecorder::_instance_count,
+        "infinite loop after dequeue");
+    }
+    if (rec != NULL) {
+      // merge the recorder into staging area
+      bool result = snapshot->merge(rec);
+      assert(result, "merge failed");
+      debug_only(_merge_count ++;)
+      MemTracker::release_thread_recorder(rec);
+    } else {
+      // no more recorder to merge, promote staging area
+      // to snapshot
+      if (_head != _tail) {
+        {
+          ThreadCritical tc;
+          if (_gen[_head] != NULL || _head == _tail) {
+            continue;
+          }
+          // done with this generation, increment _head pointer
+          _head = (_head + 1) % MAX_GENERATIONS;
+        }
+        // promote this generation data to snapshot
+        snapshot->promote();
+      } else {
+        snapshot->wait(1000);
+        ThreadCritical tc;
+        // check if more data arrived
+        if (_gen[_head] == NULL) {
+          _gen[_head] = MemTracker::get_pending_recorders();
+        }
+      }
+    }
+  }
+  assert(MemTracker::shutdown_in_progress(), "just check");
+
+  // transites to final shutdown
+  MemTracker::final_shutdown();
+}
+
+// at synchronization point, where 'safepoint visible' Java threads are blocked
+// at a safepoint, and the rest of threads are blocked on ThreadCritical lock.
+// The caller MemTracker::sync() already takes ThreadCritical before calling this
+// method.
+//
+// Following tasks are performed:
+//   1. add all recorders in pending queue to current generation
+//   2. increase generation
+
+void MemTrackWorker::at_sync_point(MemRecorder* rec) {
+  NOT_PRODUCT(_sync_point_count ++;)
+  assert(count_recorder(rec) <= MemRecorder::_instance_count,
+    "pending queue has infinite loop");
+
+  bool out_of_generation_buffer = false;
+  // check shutdown state inside ThreadCritical
+  if (MemTracker::shutdown_in_progress()) return;
+  // append the recorders to the end of the generation
+  if( rec != NULL) {
+    MemRecorder* cur_head = _gen[_tail];
+    if (cur_head == NULL) {
+      _gen[_tail] = rec;
+    } else {
+      while (cur_head->next() != NULL) {
+        cur_head = cur_head->next();
+      }
+      cur_head->set_next(rec);
+    }
+  }
+  assert(count_recorder(rec) <= MemRecorder::_instance_count,
+    "after add to current generation has infinite loop");
+  // we have collected all recorders for this generation. If there is data,
+  // we need to increment _tail to start a new generation.
+  if (_gen[_tail] != NULL || _head == _tail) {
+    _tail = (_tail + 1) % MAX_GENERATIONS;
+    out_of_generation_buffer = (_tail == _head);
+  }
+
+  if (out_of_generation_buffer) {
+    MemTracker::shutdown(MemTracker::NMT_out_of_generation);
+  }
+}
+
+#ifndef PRODUCT
+int MemTrackWorker::count_recorder(const MemRecorder* head) {
+  int count = 0;
+  while(head != NULL) {
+    count ++;
+    head = head->next();
+  }
+  return count;
+}
+
+int MemTrackWorker::count_pending_recorders() const {
+  int count = 0;
+  for (int index = 0; index < MAX_GENERATIONS; index ++) {
+    MemRecorder* head = _gen[index];
+    if (head != NULL) {
+      count += count_recorder(head);
+    }
+  }
+  return count;
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/memTrackWorker.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_MEM_TRACK_WORKER_HPP
+#define SHARE_VM_SERVICES_MEM_TRACK_WORKER_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/thread.hpp"
+#include "services/memRecorder.hpp"
+
+// Maximum MAX_GENERATIONS generation data can be tracked.
+#define MAX_GENERATIONS  512
+
+
+class MemTrackWorker : public NamedThread {
+ private:
+  // circular buffer. This buffer contains recorders to be merged into global
+  // snaphsot.
+  // Each slot holds a linked list of memory recorders, that contains one
+  // generation of memory data.
+  MemRecorder*  _gen[MAX_GENERATIONS];
+  int           _head, _tail; // head and tail pointers to above circular buffer
+
+  bool          _has_error;
+
+ public:
+  MemTrackWorker();
+  ~MemTrackWorker();
+  _NOINLINE_ void* operator new(size_t size);
+  _NOINLINE_ void* operator new(size_t size, const std::nothrow_t& nothrow_constant);
+
+  void start();
+  void run();
+
+  inline bool has_error() const { return _has_error; }
+
+  // task at synchronization point
+  void at_sync_point(MemRecorder* pending_recorders);
+
+  // for debugging purpose, they are not thread safe.
+  NOT_PRODUCT(static int count_recorder(const MemRecorder* head);)
+  NOT_PRODUCT(int count_pending_recorders() const;)
+
+  NOT_PRODUCT(int _sync_point_count;)
+  NOT_PRODUCT(int _merge_count;)
+  NOT_PRODUCT(int _last_gen_in_use;)
+
+  inline int generations_in_use() const {
+    return (_tail <= _head ? (_head - _tail + 1) : (MAX_GENERATIONS - (_tail - _head) + 1));
+  }
+};
+
+#endif // SHARE_VM_SERVICES_MEM_TRACK_WORKER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/memTracker.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -0,0 +1,617 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+
+#include "runtime/atomic.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/threadCritical.hpp"
+#include "services/memPtr.hpp"
+#include "services/memReporter.hpp"
+#include "services/memTracker.hpp"
+#include "utilities/decoder.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+bool NMT_track_callsite = false;
+
+// walk all 'known' threads at NMT sync point, and collect their recorders
+void SyncThreadRecorderClosure::do_thread(Thread* thread) {
+  assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
+  if (thread->is_Java_thread()) {
+    JavaThread* javaThread = (JavaThread*)thread;
+    MemRecorder* recorder = javaThread->get_recorder();
+    if (recorder != NULL) {
+      MemTracker::enqueue_pending_recorder(recorder);
+      javaThread->set_recorder(NULL);
+    }
+  }
+  _thread_count ++;
+}
+
+
+MemRecorder*                    MemTracker::_global_recorder = NULL;
+MemSnapshot*                    MemTracker::_snapshot = NULL;
+MemBaseline                     MemTracker::_baseline;
+Mutex                           MemTracker::_query_lock(Monitor::native, "NMT_queryLock");
+volatile MemRecorder*           MemTracker::_merge_pending_queue = NULL;
+volatile MemRecorder*           MemTracker::_pooled_recorders = NULL;
+MemTrackWorker*                 MemTracker::_worker_thread = NULL;
+int                             MemTracker::_sync_point_skip_count = 0;
+MemTracker::NMTLevel            MemTracker::_tracking_level = MemTracker::NMT_off;
+volatile MemTracker::NMTStates  MemTracker::_state = NMT_uninited;
+MemTracker::ShutdownReason      MemTracker::_reason = NMT_shutdown_none;
+int                             MemTracker::_thread_count = 255;
+volatile jint                   MemTracker::_pooled_recorder_count = 0;
+debug_only(intx                 MemTracker::_main_thread_tid = 0;)
+debug_only(volatile jint        MemTracker::_pending_recorder_count = 0;)
+
+void MemTracker::init_tracking_options(const char* option_line) {
+  _tracking_level = NMT_off;
+  if (strncmp(option_line, "=summary", 8) == 0) {
+    _tracking_level = NMT_summary;
+  } else if (strncmp(option_line, "=detail", 8) == 0) {
+    _tracking_level = NMT_detail;
+  }
+}
+
+// first phase of bootstrapping, when VM is still in single-threaded mode.
+void MemTracker::bootstrap_single_thread() {
+  if (_tracking_level > NMT_off) {
+    assert(_state == NMT_uninited, "wrong state");
+
+    // NMT is not supported with UseMallocOnly is on. NMT can NOT
+    // handle the amount of malloc data without significantly impacting
+    // runtime performance when this flag is on.
+    if (UseMallocOnly) {
+      shutdown(NMT_use_malloc_only);
+      return;
+    }
+
+    debug_only(_main_thread_tid = os::current_thread_id();)
+    _state = NMT_bootstrapping_single_thread;
+    NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
+  }
+}
+
+// second phase of bootstrapping, when VM is about to or already entered multi-theaded mode.
+void MemTracker::bootstrap_multi_thread() {
+  if (_tracking_level > NMT_off && _state == NMT_bootstrapping_single_thread) {
+  // create nmt lock for multi-thread execution
+    assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
+    _state = NMT_bootstrapping_multi_thread;
+    NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
+  }
+}
+
+// fully start nmt
+void MemTracker::start() {
+  // Native memory tracking is off from command line option
+  if (_tracking_level == NMT_off || shutdown_in_progress()) return;
+
+  assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
+  assert(_state == NMT_bootstrapping_multi_thread, "wrong state");
+
+  _snapshot = new (std::nothrow)MemSnapshot();
+  if (_snapshot != NULL && !_snapshot->out_of_memory()) {
+    if (start_worker()) {
+      _state = NMT_started;
+      NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
+      return;
+    }
+  }
+
+  // fail to start native memory tracking, shut it down
+  shutdown(NMT_initialization);
+}
+
+/**
+ * Shutting down native memory tracking.
+ * We can not shutdown native memory tracking immediately, so we just
+ * setup shutdown pending flag, every native memory tracking component
+ * should orderly shut itself down.
+ *
+ * The shutdown sequences:
+ *  1. MemTracker::shutdown() sets MemTracker to shutdown pending state
+ *  2. Worker thread calls MemTracker::final_shutdown(), which transites
+ *     MemTracker to final shutdown state.
+ *  3. At sync point, MemTracker does final cleanup, before sets memory
+ *     tracking level to off to complete shutdown.
+ */
+void MemTracker::shutdown(ShutdownReason reason) {
+  if (_tracking_level == NMT_off) return;
+
+  if (_state <= NMT_bootstrapping_single_thread) {
+    // we still in single thread mode, there is not contention
+    _state = NMT_shutdown_pending;
+    _reason = reason;
+  } else {
+    // we want to know who initialized shutdown
+    if ((jint)NMT_started == Atomic::cmpxchg((jint)NMT_shutdown_pending,
+                                       (jint*)&_state, (jint)NMT_started)) {
+        _reason = reason;
+    }
+  }
+}
+
+// final phase of shutdown
+void MemTracker::final_shutdown() {
+  // delete all pending recorders and pooled recorders
+  delete_all_pending_recorders();
+  delete_all_pooled_recorders();
+
+  {
+    // shared baseline and snapshot are the only objects needed to
+    // create query results
+    MutexLockerEx locker(&_query_lock, true);
+    // cleanup baseline data and snapshot
+    _baseline.clear();
+    delete _snapshot;
+    _snapshot = NULL;
+  }
+
+  // shutdown shared decoder instance, since it is only
+  // used by native memory tracking so far.
+  Decoder::shutdown();
+
+  MemTrackWorker* worker = NULL;
+  {
+    ThreadCritical tc;
+    // can not delete worker inside the thread critical
+    if (_worker_thread != NULL && Thread::current() == _worker_thread) {
+      worker = _worker_thread;
+      _worker_thread = NULL;
+    }
+  }
+  if (worker != NULL) {
+    delete worker;
+  }
+  _state = NMT_final_shutdown;
+}
+
+// delete all pooled recorders
+void MemTracker::delete_all_pooled_recorders() {
+  // free all pooled recorders
+  volatile MemRecorder* cur_head = _pooled_recorders;
+  if (cur_head != NULL) {
+    MemRecorder* null_ptr = NULL;
+    while (cur_head != NULL && (void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr,
+      (void*)&_pooled_recorders, (void*)cur_head)) {
+      cur_head = _pooled_recorders;
+    }
+    if (cur_head != NULL) {
+      delete cur_head;
+      _pooled_recorder_count = 0;
+    }
+  }
+}
+
+// delete all recorders in pending queue
+void MemTracker::delete_all_pending_recorders() {
+  // free all pending recorders
+  MemRecorder* pending_head = get_pending_recorders();
+  if (pending_head != NULL) {
+    delete pending_head;
+  }
+}
+
+/*
+ * retrieve per-thread recorder of specified thread.
+ * if thread == NULL, it means global recorder
+ */
+MemRecorder* MemTracker::get_thread_recorder(JavaThread* thread) {
+  if (shutdown_in_progress()) return NULL;
+
+  MemRecorder* rc;
+  if (thread == NULL) {
+    rc = _global_recorder;
+  } else {
+    rc = thread->get_recorder();
+  }
+
+  if (rc != NULL && rc->is_full()) {
+    enqueue_pending_recorder(rc);
+    rc = NULL;
+  }
+
+  if (rc == NULL) {
+    rc = get_new_or_pooled_instance();
+    if (thread == NULL) {
+      _global_recorder = rc;
+    } else {
+      thread->set_recorder(rc);
+    }
+  }
+  return rc;
+}
+
+/*
+ * get a per-thread recorder from pool, or create a new one if
+ * there is not one available.
+ */
+MemRecorder* MemTracker::get_new_or_pooled_instance() {
+   MemRecorder* cur_head = const_cast<MemRecorder*> (_pooled_recorders);
+   if (cur_head == NULL) {
+     MemRecorder* rec = new (std::nothrow)MemRecorder();
+     if (rec == NULL || rec->out_of_memory()) {
+       shutdown(NMT_out_of_memory);
+       if (rec != NULL) {
+         delete rec;
+         rec = NULL;
+       }
+     }
+     return rec;
+   } else {
+     MemRecorder* next_head = cur_head->next();
+     if ((void*)cur_head != Atomic::cmpxchg_ptr((void*)next_head, (void*)&_pooled_recorders,
+       (void*)cur_head)) {
+       return get_new_or_pooled_instance();
+     }
+     cur_head->set_next(NULL);
+     Atomic::dec(&_pooled_recorder_count);
+     debug_only(cur_head->set_generation();)
+     return cur_head;
+  }
+}
+
+/*
+ * retrieve all recorders in pending queue, and empty the queue
+ */
+MemRecorder* MemTracker::get_pending_recorders() {
+  MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
+  MemRecorder* null_ptr = NULL;
+  while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, (void*)&_merge_pending_queue,
+    (void*)cur_head)) {
+    cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
+  }
+  debug_only(Atomic::store(0, &_pending_recorder_count));
+  return cur_head;
+}
+
+/*
+ * release a recorder to recorder pool.
+ */
+void MemTracker::release_thread_recorder(MemRecorder* rec) {
+  assert(rec != NULL, "null recorder");
+  // we don't want to pool too many recorders
+  rec->set_next(NULL);
+  if (shutdown_in_progress() || _pooled_recorder_count > _thread_count * 2) {
+    delete rec;
+    return;
+  }
+
+  rec->clear();
+  MemRecorder* cur_head = const_cast<MemRecorder*>(_pooled_recorders);
+  rec->set_next(cur_head);
+  while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_pooled_recorders,
+    (void*)cur_head)) {
+    cur_head = const_cast<MemRecorder*>(_pooled_recorders);
+    rec->set_next(cur_head);
+  }
+  Atomic::inc(&_pooled_recorder_count);
+}
+
+/*
+ * This is the most important method in whole nmt implementation.
+ *
+ * Create a memory record.
+ * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM
+ *    still in single thread mode.
+ * 2. For all threads other than JavaThread, ThreadCritical is needed
+ *    to write to recorders to global recorder.
+ * 3. For JavaThreads that are not longer visible by safepoint, also
+ *    need to take ThreadCritical and records are written to global
+ *    recorders, since these threads are NOT walked by Threads.do_thread().
+ * 4. JavaThreads that are running in native state, have to transition
+ *    to VM state before writing to per-thread recorders.
+ * 5. JavaThreads that are running in VM state do not need any lock and
+ *    records are written to per-thread recorders.
+ * 6. For a thread has yet to attach VM 'Thread', they need to take
+ *    ThreadCritical to write to global recorder.
+ *
+ *    Important note:
+ *    NO LOCK should be taken inside ThreadCritical lock !!!
+ */
+void MemTracker::create_memory_record(address addr, MEMFLAGS flags,
+    size_t size, address pc, Thread* thread) {
+  if (!shutdown_in_progress()) {
+    // single thread, we just write records direct to global recorder,'
+    // with any lock
+    if (_state == NMT_bootstrapping_single_thread) {
+      assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
+      thread = NULL;
+    } else {
+      if (thread == NULL) {
+          // don't use Thread::current(), since it is possible that
+          // the calling thread has yet to attach to VM 'Thread',
+          // which will result assertion failure
+          thread = ThreadLocalStorage::thread();
+      }
+    }
+
+    if (thread != NULL) {
+#ifdef ASSERT
+      // cause assertion on stack base. This ensures that threads call
+      // Thread::record_stack_base_and_size() method, which will create
+      // thread native stack records.
+      thread->stack_base();
+#endif
+      // for a JavaThread, if it is running in native state, we need to transition it to
+      // VM state, so it can stop at safepoint. JavaThread running in VM state does not
+      // need lock to write records.
+      if (thread->is_Java_thread() && ((JavaThread*)thread)->is_safepoint_visible()) {
+        if (((JavaThread*)thread)->thread_state() == _thread_in_native) {
+          ThreadInVMfromNative trans((JavaThread*)thread);
+          create_record_in_recorder(addr, flags, size, pc, thread);
+        } else {
+          create_record_in_recorder(addr, flags, size, pc, thread);
+        }
+      } else {
+        // other threads, such as worker and watcher threads, etc. need to
+        // take ThreadCritical to write to global recorder
+        ThreadCritical tc;
+        create_record_in_recorder(addr, flags, size, pc, NULL);
+      }
+    } else {
+      if (_state == NMT_bootstrapping_single_thread) {
+        // single thread, no lock needed
+        create_record_in_recorder(addr, flags, size, pc, NULL);
+      } else {
+        // for thread has yet to attach VM 'Thread', we can not use VM mutex.
+        // use native thread critical instead
+        ThreadCritical tc;
+        create_record_in_recorder(addr, flags, size, pc, NULL);
+      }
+    }
+  }
+}
+
+// write a record to proper recorder. No lock can be taken from this method
+// down.
+void MemTracker::create_record_in_recorder(address addr, MEMFLAGS flags,
+    size_t size, address pc, Thread* thread) {
+    assert(thread == NULL || thread->is_Java_thread(), "wrong thread");
+
+    MemRecorder* rc = get_thread_recorder((JavaThread*)thread);
+    if (rc != NULL) {
+      rc->record(addr, flags, size, pc);
+    }
+}
+
+/**
+ * enqueue a recorder to pending queue
+ */
+void MemTracker::enqueue_pending_recorder(MemRecorder* rec) {
+  assert(rec != NULL, "null recorder");
+
+  // we are shutting down, so just delete it
+  if (shutdown_in_progress()) {
+    rec->set_next(NULL);
+    delete rec;
+    return;
+  }
+
+  MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
+  rec->set_next(cur_head);
+  while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_merge_pending_queue,
+    (void*)cur_head)) {
+    cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
+    rec->set_next(cur_head);
+  }
+  debug_only(Atomic::inc(&_pending_recorder_count);)
+}
+
+/*
+ * The method is called at global safepoint
+ * during it synchronization process.
+ *   1. enqueue all JavaThreads' per-thread recorders
+ *   2. enqueue global recorder
+ *   3. retrieve all pending recorders
+ *   4. reset global sequence number generator
+ *   5. call worker's sync
+ */
+#define MAX_SAFEPOINTS_TO_SKIP     128
+#define SAFE_SEQUENCE_THRESHOLD    30
+#define HIGH_GENERATION_THRESHOLD  60
+
+void MemTracker::sync() {
+  assert(_tracking_level > NMT_off, "NMT is not enabled");
+  assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
+
+  // Some GC tests hit large number of safepoints in short period of time
+  // without meaningful activities. We should prevent going to
+  // sync point in these cases, which can potentially exhaust generation buffer.
+  // Here is the factots to determine if we should go into sync point:
+  // 1. not to overflow sequence number
+  // 2. if we are in danger to overflow generation buffer
+  // 3. how many safepoints we already skipped sync point
+  if (_state == NMT_started) {
+    // worker thread is not ready, no one can manage generation
+    // buffer, so skip this safepoint
+    if (_worker_thread == NULL) return;
+
+    if (_sync_point_skip_count < MAX_SAFEPOINTS_TO_SKIP) {
+      int per_seq_in_use = SequenceGenerator::peek() * 100 / max_jint;
+      int per_gen_in_use = _worker_thread->generations_in_use() * 100 / MAX_GENERATIONS;
+      if (per_seq_in_use < SAFE_SEQUENCE_THRESHOLD && per_gen_in_use >= HIGH_GENERATION_THRESHOLD) {
+        _sync_point_skip_count ++;
+        return;
+      }
+    }
+    _sync_point_skip_count = 0;
+    // walk all JavaThreads to collect recorders
+    SyncThreadRecorderClosure stc;
+    Threads::threads_do(&stc);
+
+    _thread_count = stc.get_thread_count();
+    MemRecorder* pending_recorders = get_pending_recorders();
+
+    {
+      // This method is running at safepoint, with ThreadCritical lock,
+      // it should guarantee that NMT is fully sync-ed.
+      ThreadCritical tc;
+      if (_global_recorder != NULL) {
+        _global_recorder->set_next(pending_recorders);
+        pending_recorders = _global_recorder;
+        _global_recorder = NULL;
+      }
+      SequenceGenerator::reset();
+      // check _worker_thread with lock to avoid racing condition
+      if (_worker_thread != NULL) {
+        _worker_thread->at_sync_point(pending_recorders);
+      }
+    }
+  }
+
+  // now, it is the time to shut whole things off
+  if (_state == NMT_final_shutdown) {
+    _tracking_level = NMT_off;
+
+    // walk all JavaThreads to delete all recorders
+    SyncThreadRecorderClosure stc;
+    Threads::threads_do(&stc);
+    // delete global recorder
+    {
+      ThreadCritical tc;
+      if (_global_recorder != NULL) {
+        delete _global_recorder;
+        _global_recorder = NULL;
+      }
+    }
+
+    _state = NMT_shutdown;
+  }
+}
+
+/*
+ * Start worker thread.
+ */
+bool MemTracker::start_worker() {
+  assert(_worker_thread == NULL, "Just Check");
+  _worker_thread = new (std::nothrow) MemTrackWorker();
+  if (_worker_thread == NULL || _worker_thread->has_error()) {
+    shutdown(NMT_initialization);
+    return false;
+  }
+  _worker_thread->start();
+  return true;
+}
+
+/*
+ * We need to collect a JavaThread's per-thread recorder
+ * before it exits.
+ */
+void MemTracker::thread_exiting(JavaThread* thread) {
+  if (is_on()) {
+    MemRecorder* rec = thread->get_recorder();
+    if (rec != NULL) {
+      enqueue_pending_recorder(rec);
+      thread->set_recorder(NULL);
+    }
+  }
+}
+
+// baseline current memory snapshot
+bool MemTracker::baseline() {
+  MutexLockerEx lock(&_query_lock, true);
+  MemSnapshot* snapshot = get_snapshot();
+  if (snapshot != NULL) {
+    return _baseline.baseline(*snapshot, false);
+  }
+  return false;
+}
+
+// print memory usage from current snapshot
+bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
+  MemBaseline  baseline;
+  MutexLockerEx lock(&_query_lock, true);
+  MemSnapshot* snapshot = get_snapshot();
+  if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
+    BaselineReporter reporter(out, unit);
+    reporter.report_baseline(baseline, summary_only);
+    return true;
+  }
+  return false;
+}
+
+// compare memory usage between current snapshot and baseline
+bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
+  MutexLockerEx lock(&_query_lock, true);
+  if (_baseline.baselined()) {
+    MemBaseline baseline;
+    MemSnapshot* snapshot = get_snapshot();
+    if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
+      BaselineReporter reporter(out, unit);
+      reporter.diff_baselines(baseline, _baseline, summary_only);
+      return true;
+    }
+  }
+  return false;
+}
+
+#ifndef PRODUCT
+void MemTracker::walk_stack(int toSkip, char* buf, int len) {
+  int cur_len = 0;
+  char tmp[1024];
+  address pc;
+
+  while (cur_len < len) {
+    pc = os::get_caller_pc(toSkip + 1);
+    if (pc != NULL && os::dll_address_to_function_name(pc, tmp, sizeof(tmp), NULL)) {
+      jio_snprintf(&buf[cur_len], (len - cur_len), "%s\n", tmp);
+      cur_len = (int)strlen(buf);
+    } else {
+      buf[cur_len] = '\0';
+      break;
+    }
+    toSkip ++;
+  }
+}
+
+void MemTracker::print_tracker_stats(outputStream* st) {
+  st->print_cr("\nMemory Tracker Stats:");
+  st->print_cr("\tMax sequence number = %d", SequenceGenerator::max_seq_num());
+  st->print_cr("\tthead count = %d", _thread_count);
+  st->print_cr("\tArena instance = %d", Arena::_instance_count);
+  st->print_cr("\tpooled recorder count = %d", _pooled_recorder_count);
+  st->print_cr("\tqueued recorder count = %d", _pending_recorder_count);
+  st->print_cr("\tmemory recorder instance count = %d", MemRecorder::_instance_count);
+  if (_worker_thread != NULL) {
+    st->print_cr("\tWorker thread:");
+    st->print_cr("\t\tSync point count = %d", _worker_thread->_sync_point_count);
+    st->print_cr("\t\tpending recorder count = %d", _worker_thread->count_pending_recorders());
+    st->print_cr("\t\tmerge count = %d", _worker_thread->_merge_count);
+  } else {
+    st->print_cr("\tWorker thread is not started");
+  }
+  st->print_cr(" ");
+
+  if (_snapshot != NULL) {
+    _snapshot->print_snapshot_stats(st);
+  } else {
+    st->print_cr("No snapshot");
+  }
+}
+#endif
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/memTracker.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -0,0 +1,384 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_MEM_TRACKER_HPP
+#define SHARE_VM_SERVICES_MEM_TRACKER_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/mutex.hpp"
+#include "runtime/os.hpp"
+#include "runtime/thread.hpp"
+#include "services/memPtr.hpp"
+#include "services/memRecorder.hpp"
+#include "services/memSnapshot.hpp"
+#include "services/memTrackWorker.hpp"
+
+#ifdef SOLARIS
+#include "thread_solaris.inline.hpp"
+#endif
+
+#ifdef _DEBUG_
+  #define DEBUG_CALLER_PC  os::get_caller_pc(3)
+#else
+  #define DEBUG_CALLER_PC  0
+#endif
+
+// The thread closure walks threads to collect per-thread
+// memory recorders at NMT sync point
+class SyncThreadRecorderClosure : public ThreadClosure {
+ private:
+  int _thread_count;
+
+ public:
+  SyncThreadRecorderClosure() {
+    _thread_count =0;
+  }
+
+  void do_thread(Thread* thread);
+  int  get_thread_count() const {
+    return _thread_count;
+  }
+};
+
+class BaselineOutputer;
+class MemSnapshot;
+class MemTrackWorker;
+class Thread;
+/*
+ * MemTracker is the 'gate' class to native memory tracking runtime.
+ */
+class MemTracker : AllStatic {
+  friend class MemTrackWorker;
+  friend class MemSnapshot;
+  friend class SyncThreadRecorderClosure;
+
+  // NMT state
+  enum NMTStates {
+    NMT_uninited,                        // not yet initialized
+    NMT_bootstrapping_single_thread,     // bootstrapping, VM is in single thread mode
+    NMT_bootstrapping_multi_thread,      // bootstrapping, VM is about to enter multi-thread mode
+    NMT_started,                         // NMT fully started
+    NMT_shutdown_pending,                // shutdown pending
+    NMT_final_shutdown,                  // in final phase of shutdown
+    NMT_shutdown                         // shutdown
+  };
+
+
+  // native memory tracking level
+  enum NMTLevel {
+    NMT_off,              // native memory tracking is off
+    NMT_summary,          // don't track callsite
+    NMT_detail            // track callsite also
+  };
+
+ public:
+   enum ShutdownReason {
+     NMT_shutdown_none,     // no shutdown requested
+     NMT_shutdown_user,     // user requested shutdown
+     NMT_normal,            // normal shutdown, process exit
+     NMT_out_of_memory,     // shutdown due to out of memory
+     NMT_initialization,    // shutdown due to initialization failure
+     NMT_use_malloc_only,   // can not combine NMT with UseMallocOnly flag
+     NMT_error_reporting,   // shutdown by vmError::report_and_die()
+     NMT_out_of_generation, // running out of generation queue
+     NMT_sequence_overflow  // overflow the sequence number
+   };
+
+ public:
+  // initialize NMT tracking level from command line options, called
+   // from VM command line parsing code
+  static void init_tracking_options(const char* option_line);
+
+  // if NMT is enabled to record memory activities
+  static inline bool is_on() {
+    return (_tracking_level >= NMT_summary &&
+      _state >= NMT_bootstrapping_single_thread);
+  }
+
+  // user readable reason for shutting down NMT
+  static const char* reason() {
+    switch(_reason) {
+      case NMT_shutdown_none:
+        return "Native memory tracking is not enabled";
+      case NMT_shutdown_user:
+        return "Native memory tracking has been shutdown by user";
+      case NMT_normal:
+        return "Native memory tracking has been shutdown due to process exiting";
+      case NMT_initialization:
+        return "Native memory tracking failed to initialize";
+      case NMT_error_reporting:
+        return "Native memory tracking has been shutdown due to error reporting";
+      case NMT_out_of_generation:
+        return "Native memory tracking has been shutdown due to running out of generation buffer";
+      case NMT_sequence_overflow:
+        return "Native memory tracking has been shutdown due to overflow the sequence number";
+      case NMT_use_malloc_only:
+        return "Native memory tracking is not supported when UseMallocOnly is on";
+      default:
+        ShouldNotReachHere();
+        return NULL;
+    }
+  }
+
+  // test if we can walk native stack
+  static bool can_walk_stack() {
+  // native stack is not walkable during bootstrapping on sparc
+#if defined(SPARC)
+    return (_state == NMT_started);
+#else
+    return (_state >= NMT_bootstrapping_single_thread && _state  <= NMT_started);
+#endif
+  }
+
+  // if native memory tracking tracks callsite
+  static inline bool track_callsite() { return _tracking_level == NMT_detail; }
+
+  // shutdown native memory tracking capability. Native memory tracking
+  // can be shutdown by VM when it encounters low memory scenarios.
+  // Memory tracker should gracefully shutdown itself, and preserve the
+  // latest memory statistics for post morten diagnosis.
+  static void shutdown(ShutdownReason reason);
+
+  // if there is shutdown requested
+  static inline bool shutdown_in_progress() {
+    return (_state >= NMT_shutdown_pending);
+  }
+
+  // bootstrap native memory tracking, so it can start to collect raw data
+  // before worker thread can start
+
+  // the first phase of bootstrapping, when VM still in single-threaded mode
+  static void bootstrap_single_thread();
+  // the second phase of bootstrapping, VM is about or already in multi-threaded mode
+  static void bootstrap_multi_thread();
+
+
+  // start() has to be called when VM still in single thread mode, but after
+  // command line option parsing is done.
+  static void start();
+
+  // record a 'malloc' call
+  static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
+                            address pc = 0, Thread* thread = NULL) {
+    assert(is_on(), "check by caller");
+    if (NMT_CAN_TRACK(flags)) {
+      create_memory_record(addr, (flags|MemPointerRecord::malloc_tag()), size, pc, thread);
+    }
+  }
+  // record a 'free' call
+  static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) {
+    if (is_on() && NMT_CAN_TRACK(flags)) {
+      create_memory_record(addr, MemPointerRecord::free_tag(), 0, 0, thread);
+    }
+  }
+  // record a 'realloc' call
+  static inline void record_realloc(address old_addr, address new_addr, size_t size,
+       MEMFLAGS flags, address pc = 0, Thread* thread = NULL) {
+    if (is_on()) {
+      record_free(old_addr, flags, thread);
+      record_malloc(new_addr, size, flags, pc, thread);
+    }
+  }
+
+  // record arena size
+  static inline void record_arena_size(address addr, size_t size) {
+    // we add a positive offset to arena address, so we can have arena size record
+    // sorted after arena record
+    if (is_on() && !UseMallocOnly) {
+      create_memory_record((addr + sizeof(void*)), MemPointerRecord::arena_size_tag(), size,
+        0, NULL);
+    }
+  }
+
+  // record a virtual memory 'reserve' call
+  static inline void record_virtual_memory_reserve(address addr, size_t size,
+                            address pc = 0, Thread* thread = NULL) {
+    if (is_on()) {
+      assert(size > 0, "reserve szero size");
+      create_memory_record(addr, MemPointerRecord::virtual_memory_reserve_tag(),
+                           size, pc, thread);
+    }
+  }
+
+  // record a virtual memory 'commit' call
+  static inline void record_virtual_memory_commit(address addr, size_t size,
+                            address pc = 0, Thread* thread = NULL) {
+    if (is_on()) {
+      create_memory_record(addr, MemPointerRecord::virtual_memory_commit_tag(),
+                           size, pc, thread);
+    }
+  }
+
+  // record a virtual memory 'uncommit' call
+  static inline void record_virtual_memory_uncommit(address addr, size_t size,
+                            Thread* thread = NULL) {
+    if (is_on()) {
+      create_memory_record(addr, MemPointerRecord::virtual_memory_uncommit_tag(),
+                           size, 0, thread);
+    }
+  }
+
+  // record a virtual memory 'release' call
+  static inline void record_virtual_memory_release(address addr, size_t size,
+                            Thread* thread = NULL) {
+    if (is_on()) {
+      create_memory_record(addr, MemPointerRecord::virtual_memory_release_tag(),
+                           size, 0, thread);
+    }
+  }
+
+  // record memory type on virtual memory base address
+  static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
+                            Thread* thread = NULL) {
+    if (is_on()) {
+      assert(base > 0, "wrong base address");
+      assert((flags & (~mt_masks)) == 0, "memory type only");
+      create_memory_record(base, (flags | MemPointerRecord::virtual_memory_type_tag()),
+                           0, 0, thread);
+    }
+  }
+
+
+  // create memory baseline of current memory snapshot
+  static bool baseline();
+  // is there a memory baseline
+  static bool has_baseline() {
+    return _baseline.baselined();
+  }
+
+  // print memory usage from current snapshot
+  static bool print_memory_usage(BaselineOutputer& out, size_t unit,
+           bool summary_only = true);
+  // compare memory usage between current snapshot and baseline
+  static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
+           bool summary_only = true);
+
+  // sync is called within global safepoint to synchronize nmt data
+  static void sync();
+
+  // called when a thread is about to exit
+  static void thread_exiting(JavaThread* thread);
+
+  // retrieve global snapshot
+  static MemSnapshot* get_snapshot() {
+    assert(is_on(), "native memory tracking is off");
+    if (shutdown_in_progress()) {
+      return NULL;
+    }
+    return _snapshot;
+  }
+
+  // print tracker stats
+  NOT_PRODUCT(static void print_tracker_stats(outputStream* st);)
+  NOT_PRODUCT(static void walk_stack(int toSkip, char* buf, int len);)
+
+ private:
+  // start native memory tracking worker thread
+  static bool start_worker();
+
+  // called by worker thread to complete shutdown process
+  static void final_shutdown();
+
+ protected:
+  // retrieve per-thread recorder of the specified thread.
+  // if the recorder is full, it will be enqueued to overflow
+  // queue, a new recorder is acquired from recorder pool or a
+  // new instance is created.
+  // when thread == NULL, it means global recorder
+  static MemRecorder* get_thread_recorder(JavaThread* thread);
+
+  // per-thread recorder pool
+  static void release_thread_recorder(MemRecorder* rec);
+  static void delete_all_pooled_recorders();
+
+  // pending recorder queue. Recorders are queued to pending queue
+  // when they are overflowed or collected at nmt sync point.
+  static void enqueue_pending_recorder(MemRecorder* rec);
+  static MemRecorder* get_pending_recorders();
+  static void delete_all_pending_recorders();
+
+ private:
+  // retrieve a pooled memory record or create new one if there is not
+  // one available
+  static MemRecorder* get_new_or_pooled_instance();
+  static void create_memory_record(address addr, MEMFLAGS type,
+                   size_t size, address pc, Thread* thread);
+  static void create_record_in_recorder(address addr, MEMFLAGS type,
+                   size_t size, address pc, Thread* thread);
+
+ private:
+  // global memory snapshot
+  static MemSnapshot*     _snapshot;
+
+  // a memory baseline of snapshot
+  static MemBaseline      _baseline;
+
+  // query lock
+  static Mutex            _query_lock;
+
+  // a thread can start to allocate memory before it is attached
+  // to VM 'Thread', those memory activities are recorded here.
+  // ThreadCritical is required to guard this global recorder.
+  static MemRecorder*     _global_recorder;
+
+  // main thread id
+  debug_only(static intx   _main_thread_tid;)
+
+  // pending recorders to be merged
+  static volatile MemRecorder*      _merge_pending_queue;
+
+  NOT_PRODUCT(static volatile jint   _pending_recorder_count;)
+
+  // pooled memory recorders
+  static volatile MemRecorder*      _pooled_recorders;
+
+  // memory recorder pool management, uses following
+  // counter to determine if a released memory recorder
+  // should be pooled
+
+  // latest thread count
+  static int               _thread_count;
+  // pooled recorder count
+  static volatile jint     _pooled_recorder_count;
+
+
+  // worker thread to merge pending recorders into snapshot
+  static MemTrackWorker*  _worker_thread;
+
+  // how many safepoints we skipped without entering sync point
+  static int              _sync_point_skip_count;
+
+  // if the tracker is properly intialized
+  static bool             _is_tracker_ready;
+  // tracking level (off, summary and detail)
+  static enum NMTLevel    _tracking_level;
+
+  // current nmt state
+  static volatile enum NMTStates   _state;
+  // the reason for shutting down nmt
+  static enum ShutdownReason       _reason;
+};
+
+#endif // SHARE_VM_SERVICES_MEM_TRACKER_HPP
--- a/hotspot/src/share/vm/services/memoryManager.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/services/memoryManager.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -166,15 +166,15 @@
 
 GCStatInfo::GCStatInfo(int num_pools) {
   // initialize the arrays for memory usage
-  _before_gc_usage_array = (MemoryUsage*) NEW_C_HEAP_ARRAY(MemoryUsage, num_pools);
-  _after_gc_usage_array  = (MemoryUsage*) NEW_C_HEAP_ARRAY(MemoryUsage, num_pools);
+  _before_gc_usage_array = (MemoryUsage*) NEW_C_HEAP_ARRAY(MemoryUsage, num_pools, mtInternal);
+  _after_gc_usage_array  = (MemoryUsage*) NEW_C_HEAP_ARRAY(MemoryUsage, num_pools, mtInternal);
   _usage_array_size = num_pools;
   clear();
 }
 
 GCStatInfo::~GCStatInfo() {
-  FREE_C_HEAP_ARRAY(MemoryUsage*, _before_gc_usage_array);
-  FREE_C_HEAP_ARRAY(MemoryUsage*, _after_gc_usage_array);
+  FREE_C_HEAP_ARRAY(MemoryUsage*, _before_gc_usage_array, mtInternal);
+  FREE_C_HEAP_ARRAY(MemoryUsage*, _after_gc_usage_array, mtInternal);
 }
 
 void GCStatInfo::set_gc_usage(int pool_index, MemoryUsage usage, bool before_gc) {
@@ -214,8 +214,8 @@
 
 void GCMemoryManager::initialize_gc_stat_info() {
   assert(MemoryService::num_memory_pools() > 0, "should have one or more memory pools");
-  _last_gc_stat = new(ResourceObj::C_HEAP) GCStatInfo(MemoryService::num_memory_pools());
-  _current_gc_stat = new(ResourceObj::C_HEAP) GCStatInfo(MemoryService::num_memory_pools());
+  _last_gc_stat = new(ResourceObj::C_HEAP, mtGC) GCStatInfo(MemoryService::num_memory_pools());
+  _current_gc_stat = new(ResourceObj::C_HEAP, mtGC) GCStatInfo(MemoryService::num_memory_pools());
   // tracking concurrent collections we need two objects: one to update, and one to
   // hold the publicly available "last (completed) gc" information.
 }
--- a/hotspot/src/share/vm/services/memoryManager.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/services/memoryManager.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -40,7 +40,7 @@
 class GCMemoryManager;
 class OopClosure;
 
-class MemoryManager : public CHeapObj {
+class MemoryManager : public CHeapObj<mtInternal> {
 private:
   enum {
     max_num_pools = 10
--- a/hotspot/src/share/vm/services/memoryPool.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/services/memoryPool.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -50,7 +50,7 @@
 class PermGen;
 class ThresholdSupport;
 
-class MemoryPool : public CHeapObj {
+class MemoryPool : public CHeapObj<mtInternal> {
   friend class MemoryManager;
  public:
   enum PoolType {
--- a/hotspot/src/share/vm/services/memoryService.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/services/memoryService.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -58,9 +58,9 @@
 #endif
 
 GrowableArray<MemoryPool*>* MemoryService::_pools_list =
-  new (ResourceObj::C_HEAP) GrowableArray<MemoryPool*>(init_pools_list_size, true);
+  new (ResourceObj::C_HEAP, mtInternal) GrowableArray<MemoryPool*>(init_pools_list_size, true);
 GrowableArray<MemoryManager*>* MemoryService::_managers_list =
-  new (ResourceObj::C_HEAP) GrowableArray<MemoryManager*>(init_managers_list_size, true);
+  new (ResourceObj::C_HEAP, mtInternal) GrowableArray<MemoryManager*>(init_managers_list_size, true);
 
 GCMemoryManager* MemoryService::_minor_gc_manager = NULL;
 GCMemoryManager* MemoryService::_major_gc_manager = NULL;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/nmtDCmd.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+#include "services/nmtDCmd.hpp"
+#include "services/memReporter.hpp"
+#include "services/memTracker.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+NMTDCmd::NMTDCmd(outputStream* output,
+  bool heap): DCmdWithParser(output, heap),
+  _summary("summary", "request runtime to report current memory summary, " \
+           "which includes total reserved and committed memory, along " \
+           "with memory usage summary by each subsytem.",
+           "BOOLEAN", false, "false"),
+  _detail("detail", "request runtime to report memory allocation >= "
+           "1K by each callsite.",
+           "BOOLEAN", false, "false"),
+  _baseline("baseline", "request runtime to baseline current memory usage, " \
+            "so it can be compared against in later time.",
+            "BOOLEAN", false, "false"),
+  _summary_diff("summary.diff", "request runtime to report memory summary " \
+            "comparison against previous baseline.",
+            "BOOLEAN", false, "false"),
+  _detail_diff("detail.diff", "request runtime to report memory detail " \
+            "comparison against previous baseline, which shows the memory " \
+            "allocation activities at different callsites.",
+            "BOOLEAN", false, "false"),
+  _shutdown("shutdown", "request runtime to shutdown itself and free the " \
+            "memory used by runtime.",
+            "BOOLEAN", false, "false"),
+#ifndef PRODUCT
+  _debug("debug", "print tracker statistics. Debug only, not thread safe", \
+            "BOOLEAN", false, "false"),
+#endif
+  _scale("scale", "Memory usage in which scale, KB, MB or GB",
+       "STRING", false, "KB") {
+  _dcmdparser.add_dcmd_option(&_summary);
+  _dcmdparser.add_dcmd_option(&_detail);
+  _dcmdparser.add_dcmd_option(&_baseline);
+  _dcmdparser.add_dcmd_option(&_summary_diff);
+  _dcmdparser.add_dcmd_option(&_detail_diff);
+  _dcmdparser.add_dcmd_option(&_shutdown);
+#ifndef PRODUCT
+  _dcmdparser.add_dcmd_option(&_debug);
+#endif
+  _dcmdparser.add_dcmd_option(&_scale);
+}
+
+void NMTDCmd::execute(TRAPS) {
+  const char* scale_value = _scale.value();
+  size_t scale_unit;
+  if (strcmp(scale_value, "KB") == 0 || strcmp(scale_value, "kb") == 0) {
+    scale_unit = K;
+  } else if (strcmp(scale_value, "MB") == 0 ||
+             strcmp(scale_value, "mb") == 0) {
+    scale_unit = M;
+  } else if (strcmp(scale_value, "GB") == 0 ||
+             strcmp(scale_value, "gb") == 0) {
+    scale_unit = G;
+  } else {
+    output()->print_cr("Incorrect scale value: %s", scale_value);
+    return;
+  }
+
+  int nopt = 0;
+  if(_summary.is_set()) { ++nopt; }
+  if(_detail.is_set()) { ++nopt; }
+  if(_baseline.is_set()) { ++nopt; }
+  if(_summary_diff.is_set()) { ++nopt; }
+  if(_detail_diff.is_set()) { ++nopt; }
+  if(_shutdown.is_set()) { ++nopt; }
+#ifndef PRODUCT
+  if(_debug.is_set()) { ++nopt; }
+#endif
+
+  if(nopt > 1) {
+      output()->print_cr("At most one of the following option can be specified: " \
+        "summary, detail, baseline, summary.diff, detail.diff, shutdown"
+#ifndef PRODUCT
+        " ,debug"
+#endif
+      );
+      return;
+  }
+
+  if(nopt == 0) {
+      _summary.set_value(true);
+  }
+
+#ifndef PRODUCT
+  if (_debug.value()) {
+    output()->print_cr("debug command is NOT thread-safe, may cause crash");
+    MemTracker::print_tracker_stats(output());
+    return;
+  }
+#endif
+
+  // native memory tracking has to be on
+  if (!MemTracker::is_on() || MemTracker::shutdown_in_progress()) {
+    // if it is not on, what's the reason?
+    output()->print_cr(MemTracker::reason());
+    return;
+  }
+
+  if (_summary.value()) {
+    BaselineTTYOutputer outputer(output());
+    MemTracker::print_memory_usage(outputer, scale_unit, true);
+  } else if (_detail.value()) {
+    BaselineTTYOutputer outputer(output());
+    MemTracker::print_memory_usage(outputer, scale_unit, false);
+  } else if (_baseline.value()) {
+    if (MemTracker::baseline()) {
+      output()->print_cr("Successfully baselined.");
+    } else {
+      output()->print_cr("Baseline failed.");
+    }
+  } else if (_summary_diff.value()) {
+    if (MemTracker::has_baseline()) {
+      BaselineTTYOutputer outputer(output());
+      MemTracker::compare_memory_usage(outputer, scale_unit, true);
+    } else {
+      output()->print_cr("No baseline to compare, run 'baseline' command first");
+    }
+  } else if (_detail_diff.value()) {
+    if (MemTracker::has_baseline()) {
+      BaselineTTYOutputer outputer(output());
+      MemTracker::compare_memory_usage(outputer, scale_unit, false);
+    } else {
+      output()->print_cr("No baseline to compare to, run 'baseline' command first");
+    }
+  } else if (_shutdown.value()) {
+    MemTracker::shutdown(MemTracker::NMT_shutdown_user);
+    output()->print_cr("Shutdown is in progress, it will take a few moments to " \
+      "completely shutdown");
+  } else {
+    ShouldNotReachHere();
+    output()->print_cr("Unknown command");
+  }
+}
+
+int NMTDCmd::num_arguments() {
+  ResourceMark rm;
+  NMTDCmd* dcmd = new NMTDCmd(NULL, false);
+  if (dcmd != NULL) {
+    DCmdMark mark(dcmd);
+    return dcmd->_dcmdparser.num_arguments();
+  } else {
+    return 0;
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/nmtDCmd.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_NMT_DCMD_HPP
+#define SHARE_VM_SERVICES_NMT_DCMD_HPP
+
+#include "services/diagnosticArgument.hpp"
+#include "services/diagnosticFramework.hpp"
+
+/**
+ * Native memory tracking DCmd implementation
+ */
+class NMTDCmd: public DCmdWithParser {
+ protected:
+  DCmdArgument<bool>  _summary;
+  DCmdArgument<bool>  _detail;
+  DCmdArgument<bool>  _baseline;
+  DCmdArgument<bool>  _summary_diff;
+  DCmdArgument<bool>  _detail_diff;
+  DCmdArgument<bool>  _shutdown;
+#ifndef PRODUCT
+  DCmdArgument<bool>  _debug;
+#endif
+  DCmdArgument<char*> _scale;
+
+ public:
+  NMTDCmd(outputStream* output, bool heap);
+  static const char* name() { return "VM.native_memory"; }
+  static const char* description() {
+    return "Print native memory usage";
+  }
+  static const char* impact() {
+    return "Medium:";
+  }
+  static int num_arguments();
+  virtual void execute(TRAPS);
+};
+
+#endif // SHARE_VM_SERVICES_NMT_DCMD_HPP
--- a/hotspot/src/share/vm/services/threadService.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/services/threadService.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -437,7 +437,7 @@
     GrowableArray<MonitorInfo*>* list = jvf->locked_monitors();
     int length = list->length();
     if (length > 0) {
-      _locked_monitors = new (ResourceObj::C_HEAP) GrowableArray<oop>(length, true);
+      _locked_monitors = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(length, true);
       for (int i = 0; i < length; i++) {
         MonitorInfo* monitor = list->at(i);
         assert(monitor->owner(), "This monitor must have an owning object");
@@ -491,11 +491,11 @@
 
 ThreadStackTrace::ThreadStackTrace(JavaThread* t, bool with_locked_monitors) {
   _thread = t;
-  _frames = new (ResourceObj::C_HEAP) GrowableArray<StackFrameInfo*>(INITIAL_ARRAY_SIZE, true);
+  _frames = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<StackFrameInfo*>(INITIAL_ARRAY_SIZE, true);
   _depth = 0;
   _with_locked_monitors = with_locked_monitors;
   if (_with_locked_monitors) {
-    _jni_locked_monitors = new (ResourceObj::C_HEAP) GrowableArray<oop>(INITIAL_ARRAY_SIZE, true);
+    _jni_locked_monitors = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(INITIAL_ARRAY_SIZE, true);
   } else {
     _jni_locked_monitors = NULL;
   }
@@ -689,7 +689,7 @@
 
 ThreadConcurrentLocks::ThreadConcurrentLocks(JavaThread* thread) {
   _thread = thread;
-  _owned_locks = new (ResourceObj::C_HEAP) GrowableArray<instanceOop>(INITIAL_ARRAY_SIZE, true);
+  _owned_locks = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<instanceOop>(INITIAL_ARRAY_SIZE, true);
   _next = NULL;
 }
 
@@ -803,7 +803,7 @@
 
 DeadlockCycle::DeadlockCycle() {
   _is_deadlock = false;
-  _threads = new (ResourceObj::C_HEAP) GrowableArray<JavaThread*>(INITIAL_ARRAY_SIZE, true);
+  _threads = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JavaThread*>(INITIAL_ARRAY_SIZE, true);
   _next = NULL;
 }
 
--- a/hotspot/src/share/vm/services/threadService.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/services/threadService.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -116,7 +116,7 @@
 };
 
 // Per-thread Statistics for synchronization
-class ThreadStatistics : public CHeapObj {
+class ThreadStatistics : public CHeapObj<mtInternal> {
 private:
   // The following contention statistics are only updated by
   // the thread owning these statistics when contention occurs.
@@ -186,7 +186,7 @@
 };
 
 // Thread snapshot to represent the thread state and statistics
-class ThreadSnapshot : public CHeapObj {
+class ThreadSnapshot : public CHeapObj<mtInternal> {
 private:
   JavaThread* _thread;
   oop         _threadObj;
@@ -244,7 +244,7 @@
   void        oops_do(OopClosure* f);
 };
 
-class ThreadStackTrace : public CHeapObj {
+class ThreadStackTrace : public CHeapObj<mtInternal> {
  private:
   JavaThread*                     _thread;
   int                             _depth;  // number of stack frames added
@@ -275,7 +275,7 @@
 // StackFrameInfo for keeping methodOop and bci during
 // stack walking for later construction of StackTraceElement[]
 // Java instances
-class StackFrameInfo : public CHeapObj {
+class StackFrameInfo : public CHeapObj<mtInternal> {
  private:
   methodOop           _method;
   int                 _bci;
@@ -299,7 +299,7 @@
   void      print_on(outputStream* st) const;
 };
 
-class ThreadConcurrentLocks : public CHeapObj {
+class ThreadConcurrentLocks : public CHeapObj<mtInternal> {
 private:
   GrowableArray<instanceOop>* _owned_locks;
   ThreadConcurrentLocks*      _next;
@@ -356,7 +356,7 @@
   void                 oops_do(OopClosure* f);
 };
 
-class DeadlockCycle : public CHeapObj {
+class DeadlockCycle : public CHeapObj<mtInternal> {
  private:
   bool _is_deadlock;
   GrowableArray<JavaThread*>* _threads;
--- a/hotspot/src/share/vm/utilities/array.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/array.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -49,7 +49,7 @@
 void ResourceArray::sort(size_t esize, ftype f) {
   if (!is_empty()) qsort(_data, length(), esize, f);
 }
-void CHeapArray::sort(size_t esize, ftype f) {
+template <MEMFLAGS F> void CHeapArray<F>::sort(size_t esize, ftype f) {
   if (!is_empty()) qsort(_data, length(), esize, f);
 }
 
@@ -70,14 +70,14 @@
 }
 
 
-void CHeapArray::expand(size_t esize, int i, int& size) {
+template <MEMFLAGS F> void CHeapArray<F>::expand(size_t esize, int i, int& size) {
   // determine new size
   if (size == 0) size = 4; // prevent endless loop
   while (i >= size) size *= 2;
   // allocate and initialize new data section
-  void* data = NEW_C_HEAP_ARRAY(char*, esize * size);
+  void* data = NEW_C_HEAP_ARRAY(char*, esize * size, F);
   memcpy(data, _data, esize * length());
-  FREE_C_HEAP_ARRAY(char*, _data);
+  FREE_C_HEAP_ARRAY(char*, _data, F);
   _data = data;
 }
 
@@ -91,7 +91,7 @@
   memmove(dst, src, cnt);
 }
 
-void CHeapArray::remove_at(size_t esize, int i) {
+template <MEMFLAGS F> void CHeapArray<F>::remove_at(size_t esize, int i) {
   assert(0 <= i && i < length(), "index out of bounds");
   _length--;
   void* dst = (char*)_data + i*esize;
--- a/hotspot/src/share/vm/utilities/array.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/array.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -79,7 +79,7 @@
 };
 
 
-class CHeapArray: public CHeapObj {
+template <MEMFLAGS F>class CHeapArray: public CHeapObj<F> {
  protected:
   int   _length;                                 // the number of array elements
   void* _data;                                   // the array memory
@@ -94,7 +94,7 @@
   CHeapArray(size_t esize, int length) {
     assert(length >= 0, "illegal length");
     _length  = length;
-    _data    = (void*) NEW_C_HEAP_ARRAY(char *, esize * length);
+    _data    = (void*) NEW_C_HEAP_ARRAY(char *, esize * length, F);
   }
 
 #ifdef ASSERT
--- a/hotspot/src/share/vm/utilities/bitMap.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/bitMap.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -65,8 +65,8 @@
   if (in_resource_area) {
     _map = NEW_RESOURCE_ARRAY(bm_word_t, new_size_in_words);
   } else {
-    if (old_map != NULL) FREE_C_HEAP_ARRAY(bm_word_t, _map);
-    _map = NEW_C_HEAP_ARRAY(bm_word_t, new_size_in_words);
+    if (old_map != NULL) FREE_C_HEAP_ARRAY(bm_word_t, _map, mtInternal);
+    _map = NEW_C_HEAP_ARRAY(bm_word_t, new_size_in_words, mtInternal);
   }
   Copy::disjoint_words((HeapWord*)old_map, (HeapWord*) _map,
                        MIN2(old_size_in_words, new_size_in_words));
@@ -469,7 +469,7 @@
 
 void BitMap::init_pop_count_table() {
   if (_pop_count_table == NULL) {
-    BitMap::idx_t *table = NEW_C_HEAP_ARRAY(idx_t, 256);
+    BitMap::idx_t *table = NEW_C_HEAP_ARRAY(idx_t, 256, mtInternal);
     for (uint i = 0; i < 256; i++) {
       table[i] = num_set_bits(i);
     }
@@ -479,7 +479,7 @@
                                        (intptr_t)  NULL_WORD);
     if (res != NULL_WORD) {
       guarantee( _pop_count_table == (void*) res, "invariant" );
-      FREE_C_HEAP_ARRAY(bm_word_t, table);
+      FREE_C_HEAP_ARRAY(bm_word_t, table, mtInternal);
     }
   }
 }
--- a/hotspot/src/share/vm/utilities/decoder.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/decoder.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -29,7 +29,7 @@
 #include "memory/allocation.hpp"
 #include "runtime/mutex.hpp"
 
-class AbstractDecoder : public CHeapObj {
+class AbstractDecoder : public CHeapObj<mtInternal> {
 public:
   // status code for decoding native C frame
   enum decoder_status {
--- a/hotspot/src/share/vm/utilities/elfFile.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/elfFile.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -47,7 +47,7 @@
   m_status = NullDecoder::no_error;
 
   int len = strlen(filepath) + 1;
-  m_filepath = (const char*)os::malloc(len * sizeof(char));
+  m_filepath = (const char*)os::malloc(len * sizeof(char), mtInternal);
   if (m_filepath != NULL) {
     strcpy((char*)m_filepath, filepath);
     m_file = fopen(filepath, "r");
--- a/hotspot/src/share/vm/utilities/elfFile.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/elfFile.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -82,7 +82,7 @@
 // in "error" state, so there are scenarios, lookup will fail. We want this
 // part of code to be very defensive, and bait out if anything went wrong.
 
-class ElfFile: public CHeapObj {
+class ElfFile: public CHeapObj<mtInternal> {
   friend class ElfDecoder;
  public:
   ElfFile(const char* filepath);
--- a/hotspot/src/share/vm/utilities/elfStringTable.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/elfStringTable.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -42,7 +42,7 @@
 
   // try to load the string table
   long cur_offset = ftell(file);
-  m_table = (char*)os::malloc(sizeof(char) * shdr.sh_size);
+  m_table = (char*)os::malloc(sizeof(char) * shdr.sh_size, mtInternal);
   if (m_table != NULL) {
     // if there is an error, mark the error
     if (fseek(file, shdr.sh_offset, SEEK_SET) ||
--- a/hotspot/src/share/vm/utilities/elfStringTable.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/elfStringTable.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -35,7 +35,7 @@
 // The string table represents a string table section in an elf file.
 // Whenever there is enough memory, it will load whole string table as
 // one blob. Otherwise, it will load string from file when requested.
-class ElfStringTable: CHeapObj {
+class ElfStringTable: CHeapObj<mtInternal> {
   friend class ElfFile;
  public:
   ElfStringTable(FILE* file, Elf_Shdr shdr, int index);
--- a/hotspot/src/share/vm/utilities/elfSymbolTable.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/elfSymbolTable.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -40,7 +40,7 @@
   long cur_offset = ftell(file);
   if (cur_offset != -1) {
     // call malloc so we can back up if memory allocation fails.
-    m_symbols = (Elf_Sym*)os::malloc(shdr.sh_size);
+    m_symbols = (Elf_Sym*)os::malloc(shdr.sh_size, mtInternal);
     if (m_symbols) {
       if (fseek(file, shdr.sh_offset, SEEK_SET) ||
         fread((void*)m_symbols, shdr.sh_size, 1, file) != 1 ||
--- a/hotspot/src/share/vm/utilities/elfSymbolTable.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/elfSymbolTable.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -38,7 +38,7 @@
  * of the elf file into memory. Otherwise, it will walk the section in file
  * to look up the symbol that nearest the given address.
  */
-class ElfSymbolTable: public CHeapObj {
+class ElfSymbolTable: public CHeapObj<mtInternal> {
   friend class ElfFile;
  public:
   ElfSymbolTable(FILE* file, Elf_Shdr shdr);
--- a/hotspot/src/share/vm/utilities/events.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/events.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -46,7 +46,7 @@
 // crash time.  This is a very generic interface that is mainly here
 // for completeness.  Normally the templated EventLogBase would be
 // subclassed to provide different log types.
-class EventLog : public CHeapObj {
+class EventLog : public CHeapObj<mtInternal> {
   friend class Events;
 
  private:
--- a/hotspot/src/share/vm/utilities/exceptions.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/exceptions.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -57,7 +57,7 @@
 // field of the Thread class w/o having access to the Thread's interface (for
 // include hierachy reasons).
 
-class ThreadShadow: public CHeapObj {
+class ThreadShadow: public CHeapObj<mtThread> {
   friend class VMStructs;
 
  protected:
--- a/hotspot/src/share/vm/utilities/growableArray.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/growableArray.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -61,7 +61,7 @@
   if (on_stack()) {
     return (void*)resource_allocate_bytes(byte_size);
   } else if (on_C_heap()) {
-    return (void*)AllocateHeap(byte_size, "GrET in " __FILE__);
+    return (void*)AllocateHeap(byte_size, _memflags);
   } else {
     return _arena->Amalloc(byte_size);
   }
--- a/hotspot/src/share/vm/utilities/growableArray.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/growableArray.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -86,6 +86,9 @@
                         //   0 means default ResourceArea
                         //   1 means on C heap
                         //   otherwise, allocate in _arena
+
+  MEMFLAGS   _memflags;   // memory type if allocation in C heap
+
 #ifdef ASSERT
   int    _nesting;      // resource area nesting at creation
   void   set_nesting();
@@ -102,9 +105,14 @@
 
   // This GA will use the resource stack for storage if c_heap==false,
   // Else it will use the C heap.  Use clear_and_deallocate to avoid leaks.
-  GenericGrowableArray(int initial_size, int initial_len, bool c_heap) {
+  GenericGrowableArray(int initial_size, int initial_len, bool c_heap, MEMFLAGS flags = mtNone) {
     _len = initial_len;
     _max = initial_size;
+    _memflags = flags;
+
+    // memory type has to be specified for C heap allocation
+    assert(!(c_heap && flags == mtNone), "memory type not specified for C heap object");
+
     assert(_len >= 0 && _len <= _max, "initial_len too big");
     _arena = (c_heap ? (Arena*)1 : NULL);
     set_nesting();
@@ -121,6 +129,8 @@
     _max = initial_size;
     assert(_len >= 0 && _len <= _max, "initial_len too big");
     _arena = arena;
+    _memflags = mtNone;
+
     assert(on_arena(), "arena has taken on reserved value 0 or 1");
     // Relax next assert to allow object allocation on resource area,
     // on stack or embedded into an other object.
@@ -152,12 +162,14 @@
     for (int i = 0; i < _max; i++) ::new ((void*)&_data[i]) E();
   }
 
-  GrowableArray(int initial_size, bool C_heap = false) : GenericGrowableArray(initial_size, 0, C_heap) {
+  GrowableArray(int initial_size, bool C_heap = false, MEMFLAGS F = mtInternal)
+    : GenericGrowableArray(initial_size, 0, C_heap, F) {
     _data = (E*)raw_allocate(sizeof(E));
     for (int i = 0; i < _max; i++) ::new ((void*)&_data[i]) E();
   }
 
-  GrowableArray(int initial_size, int initial_len, const E& filler, bool C_heap = false) : GenericGrowableArray(initial_size, initial_len, C_heap) {
+  GrowableArray(int initial_size, int initial_len, const E& filler, bool C_heap = false, MEMFLAGS memflags = mtInternal)
+    : GenericGrowableArray(initial_size, initial_len, C_heap, memflags) {
     _data = (E*)raw_allocate(sizeof(E));
     int i = 0;
     for (; i < _len; i++) ::new ((void*)&_data[i]) E(filler);
--- a/hotspot/src/share/vm/utilities/hashtable.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/hashtable.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -33,11 +33,6 @@
 #include "utilities/hashtable.inline.hpp"
 
 
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL4(hs_private, hashtable__new_entry,
-  void*, unsigned int, void*, void*);
-#endif /* !USDT2 */
-
 // This is a generic hashtable, designed to be used for the symbol
 // and string tables.
 //
@@ -46,8 +41,8 @@
 // %note:
 //  - HashtableEntrys are allocated in blocks to reduce the space overhead.
 
-BasicHashtableEntry* BasicHashtable::new_entry(unsigned int hashValue) {
-  BasicHashtableEntry* entry;
+template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsigned int hashValue) {
+  BasicHashtableEntry<F>* entry;
 
   if (_free_list) {
     entry = _free_list;
@@ -58,10 +53,10 @@
       int len = _entry_size * block_size;
       len = 1 << log2_intptr(len); // round down to power of 2
       assert(len >= _entry_size, "");
-      _first_free_entry = NEW_C_HEAP_ARRAY(char, len);
+      _first_free_entry = NEW_C_HEAP_ARRAY2(char, len, F, CURRENT_PC);
       _end_block = _first_free_entry + len;
     }
-    entry = (BasicHashtableEntry*)_first_free_entry;
+    entry = (BasicHashtableEntry<F>*)_first_free_entry;
     _first_free_entry += _entry_size;
   }
 
@@ -71,29 +66,21 @@
 }
 
 
-template <class T> HashtableEntry<T>* Hashtable<T>::new_entry(unsigned int hashValue, T obj) {
-  HashtableEntry<T>* entry;
+template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::new_entry(unsigned int hashValue, T obj) {
+  HashtableEntry<T, F>* entry;
 
-  entry = (HashtableEntry<T>*)BasicHashtable::new_entry(hashValue);
+  entry = (HashtableEntry<T, F>*)BasicHashtable<F>::new_entry(hashValue);
   entry->set_literal(obj);
-#ifndef USDT2
-  HS_DTRACE_PROBE4(hs_private, hashtable__new_entry,
-    this, hashValue, obj, entry);
-#else /* USDT2 */
-  HS_PRIVATE_HASHTABLE_NEW_ENTRY(
-    this, hashValue, (uintptr_t) obj, entry);
-#endif /* USDT2 */
   return entry;
 }
 
-
 // Check to see if the hashtable is unbalanced.  The caller set a flag to
 // rehash at the next safepoint.  If this bucket is 60 times greater than the
 // expected average bucket length, it's an unbalanced hashtable.
 // This is somewhat an arbitrary heuristic but if one bucket gets to
 // rehash_count which is currently 100, there's probably something wrong.
 
-bool BasicHashtable::check_rehash_table(int count) {
+template <MEMFLAGS F> bool BasicHashtable<F>::check_rehash_table(int count) {
   assert(table_size() != 0, "underflow");
   if (count > (((double)number_of_entries()/(double)table_size())*rehash_multiple)) {
     // Set a flag for the next safepoint, which should be at some guaranteed
@@ -107,13 +94,13 @@
 // with the existing elements.   This can be used to change the hash code
 // and could in the future change the size of the table.
 
-template <class T> void Hashtable<T>::move_to(Hashtable<T>* new_table) {
-  int saved_entry_count = number_of_entries();
+template <class T, MEMFLAGS F> void Hashtable<T, F>::move_to(Hashtable<T, F>* new_table) {
+  int saved_entry_count = BasicHashtable<F>::number_of_entries();
 
   // Iterate through the table and create a new entry for the new table
   for (int i = 0; i < new_table->table_size(); ++i) {
-    for (HashtableEntry<T>* p = bucket(i); p != NULL; ) {
-      HashtableEntry<T>* next = p->next();
+    for (HashtableEntry<T, F>* p = bucket(i); p != NULL; ) {
+      HashtableEntry<T, F>* next = p->next();
       T string = p->literal();
       // Use alternate hashing algorithm on the symbol in the first table
       unsigned int hashValue = new_hash(string);
@@ -141,16 +128,16 @@
   // for the elements has been used in a new table and is not
   // destroyed.  The memory reuse will benefit resizing the SystemDictionary
   // to avoid a memory allocation spike at safepoint.
-  free_buckets();
+  BasicHashtable<F>::free_buckets();
 }
 
-void BasicHashtable::free_buckets() {
+template <MEMFLAGS F> void BasicHashtable<F>::free_buckets() {
   if (NULL != _buckets) {
     // Don't delete the buckets in the shared space.  They aren't
     // allocated by os::malloc
     if (!UseSharedSpaces ||
         !FileMapInfo::current_info()->is_in_shared_space(_buckets)) {
-       FREE_C_HEAP_ARRAY(HashtableBucket, _buckets);
+       FREE_C_HEAP_ARRAY(HashtableBucket, _buckets, F);
     }
     _buckets = NULL;
   }
@@ -159,13 +146,13 @@
 
 // Reverse the order of elements in the hash buckets.
 
-void BasicHashtable::reverse() {
+template <MEMFLAGS F> void BasicHashtable<F>::reverse() {
 
   for (int i = 0; i < _table_size; ++i) {
-    BasicHashtableEntry* new_list = NULL;
-    BasicHashtableEntry* p = bucket(i);
+    BasicHashtableEntry<F>* new_list = NULL;
+    BasicHashtableEntry<F>* p = bucket(i);
     while (p != NULL) {
-      BasicHashtableEntry* next = p->next();
+      BasicHashtableEntry<F>* next = p->next();
       p->set_next(new_list);
       new_list = p;
       p = next;
@@ -177,7 +164,7 @@
 
 // Copy the table to the shared space.
 
-void BasicHashtable::copy_table(char** top, char* end) {
+template <MEMFLAGS F> void BasicHashtable<F>::copy_table(char** top, char* end) {
 
   // Dump the hash table entries.
 
@@ -186,13 +173,13 @@
 
   int i;
   for (i = 0; i < _table_size; ++i) {
-    for (BasicHashtableEntry** p = _buckets[i].entry_addr();
+    for (BasicHashtableEntry<F>** p = _buckets[i].entry_addr();
                               *p != NULL;
                                p = (*p)->next_addr()) {
       if (*top + entry_size() > end) {
         report_out_of_shared_space(SharedMiscData);
       }
-      *p = (BasicHashtableEntry*)memcpy(*top, *p, entry_size());
+      *p = (BasicHashtableEntry<F>*)memcpy(*top, *p, entry_size());
       *top += entry_size();
     }
   }
@@ -201,7 +188,7 @@
   // Set the shared bit.
 
   for (i = 0; i < _table_size; ++i) {
-    for (BasicHashtableEntry* p = bucket(i); p != NULL; p = p->next()) {
+    for (BasicHashtableEntry<F>* p = bucket(i); p != NULL; p = p->next()) {
       p->set_shared();
     }
   }
@@ -211,15 +198,15 @@
 
 // Reverse the order of elements in the hash buckets.
 
-template <class T> void Hashtable<T>::reverse(void* boundary) {
+template <class T, MEMFLAGS F> void Hashtable<T, F>::reverse(void* boundary) {
 
-  for (int i = 0; i < table_size(); ++i) {
-    HashtableEntry<T>* high_list = NULL;
-    HashtableEntry<T>* low_list = NULL;
-    HashtableEntry<T>* last_low_entry = NULL;
-    HashtableEntry<T>* p = bucket(i);
+  for (int i = 0; i < this->table_size(); ++i) {
+    HashtableEntry<T, F>* high_list = NULL;
+    HashtableEntry<T, F>* low_list = NULL;
+    HashtableEntry<T, F>* last_low_entry = NULL;
+    HashtableEntry<T, F>* p = bucket(i);
     while (p != NULL) {
-      HashtableEntry<T>* next = p->next();
+      HashtableEntry<T, F>* next = p->next();
       if ((void*)p->literal() >= boundary) {
         p->set_next(high_list);
         high_list = p;
@@ -244,8 +231,8 @@
 
 // Dump the hash table buckets.
 
-void BasicHashtable::copy_buckets(char** top, char* end) {
-  intptr_t len = _table_size * sizeof(HashtableBucket);
+template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char** top, char* end) {
+  intptr_t len = _table_size * sizeof(HashtableBucket<F>);
   *(intptr_t*)(*top) = len;
   *top += sizeof(intptr_t);
 
@@ -255,18 +242,18 @@
   if (*top + len > end) {
     report_out_of_shared_space(SharedMiscData);
   }
-  _buckets = (HashtableBucket*)memcpy(*top, _buckets, len);
+  _buckets = (HashtableBucket<F>*)memcpy(*top, _buckets, len);
   *top += len;
 }
 
 
 #ifndef PRODUCT
 
-template <class T> void Hashtable<T>::print() {
+template <class T, MEMFLAGS F> void Hashtable<T, F>::print() {
   ResourceMark rm;
 
-  for (int i = 0; i < table_size(); i++) {
-    HashtableEntry<T>* entry = bucket(i);
+  for (int i = 0; i < BasicHashtable<F>::table_size(); i++) {
+    HashtableEntry<T, F>* entry = bucket(i);
     while(entry != NULL) {
       tty->print("%d : ", i);
       entry->literal()->print();
@@ -277,10 +264,10 @@
 }
 
 
-void BasicHashtable::verify() {
+template <MEMFLAGS F> void BasicHashtable<F>::verify() {
   int count = 0;
   for (int i = 0; i < table_size(); i++) {
-    for (BasicHashtableEntry* p = bucket(i); p != NULL; p = p->next()) {
+    for (BasicHashtableEntry<F>* p = bucket(i); p != NULL; p = p->next()) {
       ++count;
     }
   }
@@ -293,7 +280,7 @@
 
 #ifdef ASSERT
 
-void BasicHashtable::verify_lookup_length(double load) {
+template <MEMFLAGS F> void BasicHashtable<F>::verify_lookup_length(double load) {
   if ((double)_lookup_length / (double)_lookup_count > load * 2.0) {
     warning("Performance bug: SystemDictionary lookup_count=%d "
             "lookup_length=%d average=%lf load=%f",
@@ -303,10 +290,22 @@
 }
 
 #endif
-
 // Explicitly instantiate these types
-template class Hashtable<constantPoolOop>;
-template class Hashtable<Symbol*>;
-template class Hashtable<klassOop>;
-template class Hashtable<oop>;
-
+template class Hashtable<constantPoolOop, mtClass>;
+template class Hashtable<Symbol*, mtSymbol>;
+template class Hashtable<klassOop, mtClass>;
+template class Hashtable<oop, mtClass>;
+#ifdef SOLARIS
+template class Hashtable<oop, mtSymbol>;
+#endif
+template class Hashtable<oopDesc*, mtSymbol>;
+template class Hashtable<Symbol*, mtClass>;
+template class HashtableEntry<Symbol*, mtSymbol>;
+template class HashtableEntry<Symbol*, mtClass>;
+template class HashtableEntry<oop, mtSymbol>;
+template class BasicHashtableEntry<mtSymbol>;
+template class BasicHashtableEntry<mtCode>;
+template class BasicHashtable<mtClass>;
+template class BasicHashtable<mtSymbol>;
+template class BasicHashtable<mtCode>;
+template class BasicHashtable<mtInternal>;
--- a/hotspot/src/share/vm/utilities/hashtable.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/hashtable.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -40,7 +40,7 @@
 
 
 
-class BasicHashtableEntry : public CHeapObj {
+template <MEMFLAGS F> class BasicHashtableEntry : public CHeapObj<F> {
   friend class VMStructs;
 private:
   unsigned int         _hash;           // 32-bit hash for item
@@ -52,7 +52,7 @@
   // shared entries will not change.  New entries will always be
   // unshared and since pointers are align, bit 0 will always remain 0
   // with no extra effort.
-  BasicHashtableEntry* _next;
+  BasicHashtableEntry<F>* _next;
 
   // Windows IA64 compiler requires subclasses to be able to access these
 protected:
@@ -69,19 +69,19 @@
   void set_hash(unsigned int hash)      { _hash = hash; }
   unsigned int* hash_addr()             { return &_hash; }
 
-  static BasicHashtableEntry* make_ptr(BasicHashtableEntry* p) {
+  static BasicHashtableEntry<F>* make_ptr(BasicHashtableEntry<F>* p) {
     return (BasicHashtableEntry*)((intptr_t)p & -2);
   }
 
-  BasicHashtableEntry* next() const {
+  BasicHashtableEntry<F>* next() const {
     return make_ptr(_next);
   }
 
-  void set_next(BasicHashtableEntry* next) {
+  void set_next(BasicHashtableEntry<F>* next) {
     _next = next;
   }
 
-  BasicHashtableEntry** next_addr() {
+  BasicHashtableEntry<F>** next_addr() {
     return &_next;
   }
 
@@ -90,13 +90,13 @@
   }
 
   void set_shared() {
-    _next = (BasicHashtableEntry*)((intptr_t)_next | 1);
+    _next = (BasicHashtableEntry<F>*)((intptr_t)_next | 1);
   }
 };
 
 
 
-template <class T> class HashtableEntry : public BasicHashtableEntry {
+template <class T, MEMFLAGS F> class HashtableEntry : public BasicHashtableEntry<F> {
   friend class VMStructs;
 private:
   T               _literal;          // ref to item in table.
@@ -108,20 +108,20 @@
   void set_literal(T s)               { _literal = s; }
 
   HashtableEntry* next() const {
-    return (HashtableEntry*)BasicHashtableEntry::next();
+    return (HashtableEntry*)BasicHashtableEntry<F>::next();
   }
   HashtableEntry** next_addr() {
-    return (HashtableEntry**)BasicHashtableEntry::next_addr();
+    return (HashtableEntry**)BasicHashtableEntry<F>::next_addr();
   }
 };
 
 
 
-class HashtableBucket : public CHeapObj {
+template <MEMFLAGS F> class HashtableBucket : public CHeapObj<F> {
   friend class VMStructs;
 private:
   // Instance variable
-  BasicHashtableEntry*       _entry;
+  BasicHashtableEntry<F>*       _entry;
 
 public:
   // Accessing
@@ -129,21 +129,21 @@
 
   // The following methods use order access methods to avoid race
   // conditions in multiprocessor systems.
-  BasicHashtableEntry* get_entry() const;
-  void set_entry(BasicHashtableEntry* l);
+  BasicHashtableEntry<F>* get_entry() const;
+  void set_entry(BasicHashtableEntry<F>* l);
 
   // The following method is not MT-safe and must be done under lock.
-  BasicHashtableEntry** entry_addr()  { return &_entry; }
+  BasicHashtableEntry<F>** entry_addr()  { return &_entry; }
 };
 
 
-class BasicHashtable : public CHeapObj {
+template <MEMFLAGS F> class BasicHashtable : public CHeapObj<F> {
   friend class VMStructs;
 
 public:
   BasicHashtable(int table_size, int entry_size);
   BasicHashtable(int table_size, int entry_size,
-                 HashtableBucket* buckets, int number_of_entries);
+                 HashtableBucket<F>* buckets, int number_of_entries);
 
   // Sharing support.
   void copy_buckets(char** top, char* end);
@@ -162,8 +162,8 @@
 private:
   // Instance variables
   int               _table_size;
-  HashtableBucket*  _buckets;
-  BasicHashtableEntry* _free_list;
+  HashtableBucket<F>*     _buckets;
+  BasicHashtableEntry<F>* _free_list;
   char*             _first_free_entry;
   char*             _end_block;
   int               _entry_size;
@@ -188,20 +188,20 @@
   int entry_size() const { return _entry_size; }
 
   // The following method is MT-safe and may be used with caution.
-  BasicHashtableEntry* bucket(int i);
+  BasicHashtableEntry<F>* bucket(int i);
 
   // The following method is not MT-safe and must be done under lock.
-  BasicHashtableEntry** bucket_addr(int i) { return _buckets[i].entry_addr(); }
+  BasicHashtableEntry<F>** bucket_addr(int i) { return _buckets[i].entry_addr(); }
 
   // Table entry management
-  BasicHashtableEntry* new_entry(unsigned int hashValue);
+  BasicHashtableEntry<F>* new_entry(unsigned int hashValue);
 
   // Check that the table is unbalanced
   bool check_rehash_table(int count);
 
   // Used when moving the entry to another table
   // Clean up links, but do not add to free_list
-  void unlink_entry(BasicHashtableEntry* entry) {
+  void unlink_entry(BasicHashtableEntry<F>* entry) {
     entry->set_next(NULL);
     --_number_of_entries;
   }
@@ -221,11 +221,11 @@
 
 public:
   int table_size() { return _table_size; }
-  void set_entry(int index, BasicHashtableEntry* entry);
+  void set_entry(int index, BasicHashtableEntry<F>* entry);
 
-  void add_entry(int index, BasicHashtableEntry* entry);
+  void add_entry(int index, BasicHashtableEntry<F>* entry);
 
-  void free_entry(BasicHashtableEntry* entry);
+  void free_entry(BasicHashtableEntry<F>* entry);
 
   int number_of_entries() { return _number_of_entries; }
 
@@ -233,16 +233,16 @@
 };
 
 
-template <class T> class Hashtable : public BasicHashtable {
+template <class T, MEMFLAGS F> class Hashtable : public BasicHashtable<F> {
   friend class VMStructs;
 
 public:
   Hashtable(int table_size, int entry_size)
-    : BasicHashtable(table_size, entry_size) { }
+    : BasicHashtable<F>(table_size, entry_size) { }
 
   Hashtable(int table_size, int entry_size,
-                   HashtableBucket* buckets, int number_of_entries)
-    : BasicHashtable(table_size, entry_size, buckets, number_of_entries) { }
+                   HashtableBucket<F>* buckets, int number_of_entries)
+    : BasicHashtable<F>(table_size, entry_size, buckets, number_of_entries) { }
 
   // Debugging
   void print()               PRODUCT_RETURN;
@@ -264,35 +264,35 @@
   }
 
   // Table entry management
-  HashtableEntry<T>* new_entry(unsigned int hashValue, T obj);
+  HashtableEntry<T, F>* new_entry(unsigned int hashValue, T obj);
 
   // The following method is MT-safe and may be used with caution.
-  HashtableEntry<T>* bucket(int i) {
-    return (HashtableEntry<T>*)BasicHashtable::bucket(i);
+  HashtableEntry<T, F>* bucket(int i) {
+    return (HashtableEntry<T, F>*)BasicHashtable<F>::bucket(i);
   }
 
   // The following method is not MT-safe and must be done under lock.
-  HashtableEntry<T>** bucket_addr(int i) {
-    return (HashtableEntry<T>**)BasicHashtable::bucket_addr(i);
+  HashtableEntry<T, F>** bucket_addr(int i) {
+    return (HashtableEntry<T, F>**)BasicHashtable<F>::bucket_addr(i);
   }
 
   // Function to move these elements into the new table.
-  void move_to(Hashtable<T>* new_table);
+  void move_to(Hashtable<T, F>* new_table);
   virtual unsigned int new_hash(T) { ShouldNotReachHere(); return 0; } // should be overridden
 };
 
 
 //  Verions of hashtable where two handles are used to compute the index.
 
-template <class T> class TwoOopHashtable : public Hashtable<T> {
+template <class T, MEMFLAGS F> class TwoOopHashtable : public Hashtable<T, F> {
   friend class VMStructs;
 protected:
   TwoOopHashtable(int table_size, int entry_size)
-    : Hashtable<T>(table_size, entry_size) {}
+    : Hashtable<T, F>(table_size, entry_size) {}
 
-  TwoOopHashtable(int table_size, int entry_size, HashtableBucket* t,
+  TwoOopHashtable(int table_size, int entry_size, HashtableBucket<F>* t,
                   int number_of_entries)
-    : Hashtable<T>(table_size, entry_size, t, number_of_entries) {}
+    : Hashtable<T, F>(table_size, entry_size, t, number_of_entries) {}
 
 public:
   unsigned int compute_hash(Symbol* name, Handle loader) {
--- a/hotspot/src/share/vm/utilities/hashtable.inline.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/hashtable.inline.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -27,6 +27,7 @@
 
 #include "memory/allocation.inline.hpp"
 #include "utilities/hashtable.hpp"
+#include "utilities/dtrace.hpp"
 
 // Inline function definitions for hashtable.hpp.
 
@@ -34,18 +35,18 @@
 
 // Initialize a table.
 
-inline BasicHashtable::BasicHashtable(int table_size, int entry_size) {
+template <MEMFLAGS F> inline BasicHashtable<F>::BasicHashtable(int table_size, int entry_size) {
   // Called on startup, no locking needed
   initialize(table_size, entry_size, 0);
-  _buckets = NEW_C_HEAP_ARRAY(HashtableBucket, table_size);
+  _buckets = NEW_C_HEAP_ARRAY2(HashtableBucket<F>, table_size, F, CURRENT_PC);
   for (int index = 0; index < _table_size; index++) {
     _buckets[index].clear();
   }
 }
 
 
-inline BasicHashtable::BasicHashtable(int table_size, int entry_size,
-                                      HashtableBucket* buckets,
+template <MEMFLAGS F> inline BasicHashtable<F>::BasicHashtable(int table_size, int entry_size,
+                                      HashtableBucket<F>* buckets,
                                       int number_of_entries) {
   // Called on startup, no locking needed
   initialize(table_size, entry_size, number_of_entries);
@@ -53,7 +54,7 @@
 }
 
 
-inline void BasicHashtable::initialize(int table_size, int entry_size,
+template <MEMFLAGS F> inline void BasicHashtable<F>::initialize(int table_size, int entry_size,
                                        int number_of_entries) {
   // Called on startup, no locking needed
   _table_size = table_size;
@@ -70,12 +71,12 @@
 
 
 // The following method is MT-safe and may be used with caution.
-inline BasicHashtableEntry* BasicHashtable::bucket(int i) {
+template <MEMFLAGS F> inline BasicHashtableEntry<F>* BasicHashtable<F>::bucket(int i) {
   return _buckets[i].get_entry();
 }
 
 
-inline void HashtableBucket::set_entry(BasicHashtableEntry* l) {
+template <MEMFLAGS F> inline void HashtableBucket<F>::set_entry(BasicHashtableEntry<F>* l) {
   // Warning: Preserve store ordering.  The SystemDictionary is read
   //          without locks.  The new SystemDictionaryEntry must be
   //          complete before other threads can be allowed to see it
@@ -84,27 +85,27 @@
 }
 
 
-inline BasicHashtableEntry* HashtableBucket::get_entry() const {
+template <MEMFLAGS F> inline BasicHashtableEntry<F>* HashtableBucket<F>::get_entry() const {
   // Warning: Preserve load ordering.  The SystemDictionary is read
   //          without locks.  The new SystemDictionaryEntry must be
   //          complete before other threads can be allowed to see it
   //          via a store to _buckets[index].
-  return (BasicHashtableEntry*) OrderAccess::load_ptr_acquire(&_entry);
+  return (BasicHashtableEntry<F>*) OrderAccess::load_ptr_acquire(&_entry);
 }
 
 
-inline void BasicHashtable::set_entry(int index, BasicHashtableEntry* entry) {
+template <MEMFLAGS F> inline void BasicHashtable<F>::set_entry(int index, BasicHashtableEntry<F>* entry) {
   _buckets[index].set_entry(entry);
 }
 
 
-inline void BasicHashtable::add_entry(int index, BasicHashtableEntry* entry) {
+template <MEMFLAGS F> inline void BasicHashtable<F>::add_entry(int index, BasicHashtableEntry<F>* entry) {
   entry->set_next(bucket(index));
   _buckets[index].set_entry(entry);
   ++_number_of_entries;
 }
 
-inline void BasicHashtable::free_entry(BasicHashtableEntry* entry) {
+template <MEMFLAGS F> inline void BasicHashtable<F>::free_entry(BasicHashtableEntry<F>* entry) {
   entry->set_next(_free_list);
   _free_list = entry;
   --_number_of_entries;
--- a/hotspot/src/share/vm/utilities/histogram.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/histogram.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -69,7 +69,7 @@
 
 Histogram::Histogram(const char* title,int estimatedCount) {
   _title = title;
-  _elements = new (ResourceObj::C_HEAP) GrowableArray<HistogramElement*>(estimatedCount,true);
+  _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<HistogramElement*>(estimatedCount,true);
 }
 
 void Histogram::add_element(HistogramElement* element) {
--- a/hotspot/src/share/vm/utilities/histogram.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/histogram.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -77,7 +77,7 @@
 
 #ifdef ASSERT
 
-class HistogramElement : public CHeapObj {
+class HistogramElement : public CHeapObj<mtInternal> {
  protected:
   jint _count;
   const char* _name;
@@ -91,7 +91,7 @@
   virtual int compare(HistogramElement* e1,HistogramElement* e2);
 };
 
-class Histogram : public CHeapObj {
+class Histogram : public CHeapObj<mtInternal> {
  protected:
   GrowableArray<HistogramElement*>* _elements;
   GrowableArray<HistogramElement*>* elements() { return _elements; }
--- a/hotspot/src/share/vm/utilities/intHisto.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/intHisto.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -27,7 +27,7 @@
 
 IntHistogram::IntHistogram(int est, int max) : _max(max), _tot(0) {
   assert(0 <= est && est <= max, "Preconditions");
-  _elements = new (ResourceObj::C_HEAP) GrowableArray<int>(est, true);
+  _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(est, true);
   guarantee(_elements != NULL, "alloc failure");
 }
 
--- a/hotspot/src/share/vm/utilities/intHisto.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/intHisto.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -47,7 +47,7 @@
 // relation) to a count.
 
 
-class IntHistogram : public CHeapObj {
+class IntHistogram : public CHeapObj<mtInternal> {
  protected:
   int _max;
   int _tot;
--- a/hotspot/src/share/vm/utilities/numberSeq.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/numberSeq.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -133,13 +133,13 @@
 
 TruncatedSeq::TruncatedSeq(int length, double alpha):
   AbsSeq(alpha), _length(length), _next(0) {
-  _sequence = NEW_C_HEAP_ARRAY(double, _length);
+  _sequence = NEW_C_HEAP_ARRAY(double, _length, mtInternal);
   for (int i = 0; i < _length; ++i)
     _sequence[i] = 0.0;
 }
 
 TruncatedSeq::~TruncatedSeq() {
-  FREE_C_HEAP_ARRAY(double, _sequence);
+  FREE_C_HEAP_ARRAY(double, _sequence, mtGC);
 }
 
 void TruncatedSeq::add(double val) {
--- a/hotspot/src/share/vm/utilities/numberSeq.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/numberSeq.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -25,6 +25,8 @@
 #ifndef SHARE_VM_UTILITIES_NUMBERSEQ_HPP
 #define SHARE_VM_UTILITIES_NUMBERSEQ_HPP
 
+#include "memory/allocation.hpp"
+
 /**
  **  This file contains a few classes that represent number sequence,
  **  x1, x2, x3, ..., xN, and can calculate their avg, max, and sd.
@@ -40,7 +42,7 @@
 
 #define DEFAULT_ALPHA_VALUE 0.7
 
-class AbsSeq {
+class AbsSeq: public CHeapObj<mtInternal> {
 private:
   void init(double alpha);
 
--- a/hotspot/src/share/vm/utilities/ostream.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/ostream.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -384,7 +384,7 @@
   if (_file != NULL) {
     if (_need_close) fclose(_file);
     _file      = NULL;
-    FREE_C_HEAP_ARRAY(char, _file_name);
+    FREE_C_HEAP_ARRAY(char, _file_name, mtInternal);
     _file_name = NULL;
   }
 }
@@ -392,7 +392,7 @@
 rotatingFileStream::rotatingFileStream(const char* file_name) {
   _cur_file_num = 0;
   _bytes_writen = 0L;
-  _file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10);
+  _file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10, mtInternal);
   jio_snprintf(_file_name, strlen(file_name)+10, "%s.%d", file_name, _cur_file_num);
   _file = fopen(_file_name, "w");
   _need_close = true;
@@ -401,7 +401,7 @@
 rotatingFileStream::rotatingFileStream(const char* file_name, const char* opentype) {
   _cur_file_num = 0;
   _bytes_writen = 0L;
-  _file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10);
+  _file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10, mtInternal);
   jio_snprintf(_file_name, strlen(file_name)+10, "%s.%d", file_name, _cur_file_num);
   _file = fopen(_file_name, opentype);
   _need_close = true;
@@ -524,7 +524,7 @@
   }
 
   // Create big enough buffer.
-  char *buf = NEW_C_HEAP_ARRAY(char, buffer_length);
+  char *buf = NEW_C_HEAP_ARRAY(char, buffer_length, mtInternal);
 
   strcpy(buf, "");
   if (force_directory != NULL) {
@@ -549,7 +549,7 @@
   // %%% Need a MutexLocker?
   const char* log_name = LogFile != NULL ? LogFile : "hotspot.log";
   const char* try_name = make_log_name(log_name, NULL);
-  fileStream* file = new(ResourceObj::C_HEAP) fileStream(try_name);
+  fileStream* file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
   if (!file->is_open()) {
     // Try again to open the file.
     char warnbuf[O_BUFLEN*2];
@@ -557,18 +557,18 @@
                  "Warning:  Cannot open log file: %s\n", try_name);
     // Note:  This feature is for maintainer use only.  No need for L10N.
     jio_print(warnbuf);
-    FREE_C_HEAP_ARRAY(char, try_name);
+    FREE_C_HEAP_ARRAY(char, try_name, mtInternal);
     try_name = make_log_name("hs_pid%p.log", os::get_temp_directory());
     jio_snprintf(warnbuf, sizeof(warnbuf),
                  "Warning:  Forcing option -XX:LogFile=%s\n", try_name);
     jio_print(warnbuf);
     delete file;
-    file = new(ResourceObj::C_HEAP) fileStream(try_name);
-    FREE_C_HEAP_ARRAY(char, try_name);
+    file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
+    FREE_C_HEAP_ARRAY(char, try_name, mtInternal);
   }
   if (file->is_open()) {
     _log_file = file;
-    xmlStream* xs = new(ResourceObj::C_HEAP) xmlStream(file);
+    xmlStream* xs = new(ResourceObj::C_HEAP, mtInternal) xmlStream(file);
     _outer_xmlStream = xs;
     if (this == tty)  xtty = xs;
     // Write XML header.
@@ -815,7 +815,7 @@
 
 void ostream_init() {
   if (defaultStream::instance == NULL) {
-    defaultStream::instance = new(ResourceObj::C_HEAP) defaultStream();
+    defaultStream::instance = new(ResourceObj::C_HEAP, mtInternal) defaultStream();
     tty = defaultStream::instance;
 
     // We want to ensure that time stamps in GC logs consider time 0
@@ -833,9 +833,9 @@
   gclog_or_tty = tty; // default to tty
   if (Arguments::gc_log_filename() != NULL) {
     fileStream * gclog  = UseGCLogFileRotation ?
-                          new(ResourceObj::C_HEAP)
+                          new(ResourceObj::C_HEAP, mtInternal)
                              rotatingFileStream(Arguments::gc_log_filename()) :
-                          new(ResourceObj::C_HEAP)
+                          new(ResourceObj::C_HEAP, mtInternal)
                              fileStream(Arguments::gc_log_filename());
     if (gclog->is_open()) {
       // now we update the time stamp of the GC log to be synced up
@@ -940,7 +940,7 @@
 
 bufferedStream::bufferedStream(size_t initial_size, size_t bufmax) : outputStream() {
   buffer_length = initial_size;
-  buffer        = NEW_C_HEAP_ARRAY(char, buffer_length);
+  buffer        = NEW_C_HEAP_ARRAY(char, buffer_length, mtInternal);
   buffer_pos    = 0;
   buffer_fixed  = false;
   buffer_max    = bufmax;
@@ -971,7 +971,7 @@
       if (end < buffer_length * 2) {
         end = buffer_length * 2;
       }
-      buffer = REALLOC_C_HEAP_ARRAY(char, buffer, end);
+      buffer = REALLOC_C_HEAP_ARRAY(char, buffer, end, mtInternal);
       buffer_length = end;
     }
   }
@@ -989,7 +989,7 @@
 
 bufferedStream::~bufferedStream() {
   if (!buffer_fixed) {
-    FREE_C_HEAP_ARRAY(char, buffer);
+    FREE_C_HEAP_ARRAY(char, buffer, mtInternal);
   }
 }
 
--- a/hotspot/src/share/vm/utilities/stack.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/stack.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_UTILITIES_STACK_HPP
 #define SHARE_VM_UTILITIES_STACK_HPP
 
+#include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
 
 // Class Stack (below) grows and shrinks by linking together "segments" which
@@ -51,11 +52,11 @@
 // implementation in class Stack assumes that alloc() will terminate the process
 // if the allocation fails.
 
-template <class E> class StackIterator;
+template <class E, MEMFLAGS F> class StackIterator;
 
 // StackBase holds common data/methods that don't depend on the element type,
 // factored out to reduce template code duplication.
-class StackBase
+template <MEMFLAGS F> class StackBase
 {
 public:
   size_t segment_size()   const { return _seg_size; } // Elements per segment.
@@ -89,11 +90,11 @@
 #define inline
 #endif // __GNUC__
 
-template <class E>
-class Stack:  public StackBase
+template <class E, MEMFLAGS F>
+class Stack:  public StackBase<F>
 {
 public:
-  friend class StackIterator<E>;
+  friend class StackIterator<E, F>;
 
   // segment_size:    number of items per segment
   // max_cache_size:  maxmium number of *segments* to cache
@@ -103,15 +104,15 @@
                size_t max_cache_size = 4, size_t max_size = 0);
   inline ~Stack() { clear(true); }
 
-  inline bool is_empty() const { return _cur_seg == NULL; }
-  inline bool is_full()  const { return _full_seg_size >= max_size(); }
+  inline bool is_empty() const { return this->_cur_seg == NULL; }
+  inline bool is_full()  const { return this->_full_seg_size >= this->max_size(); }
 
   // Performance sensitive code should use is_empty() instead of size() == 0 and
   // is_full() instead of size() == max_size().  Using a conditional here allows
   // just one var to be updated when pushing/popping elements instead of two;
   // _full_seg_size is updated only when pushing/popping segments.
   inline size_t size() const {
-    return is_empty() ? 0 : _full_seg_size + _cur_seg_size;
+    return is_empty() ? 0 : this->_full_seg_size + this->_cur_seg_size;
   }
 
   inline void push(E elem);
@@ -161,18 +162,18 @@
   E* _cache;      // Segment cache to avoid ping-ponging.
 };
 
-template <class E> class ResourceStack:  public Stack<E>, public ResourceObj
+template <class E, MEMFLAGS F> class ResourceStack:  public Stack<E, F>, public ResourceObj
 {
 public:
   // If this class becomes widely used, it may make sense to save the Thread
   // and use it when allocating segments.
-  ResourceStack(size_t segment_size = Stack<E>::default_segment_size()):
-    Stack<E>(segment_size, max_uintx)
+//  ResourceStack(size_t segment_size = Stack<E, F>::default_segment_size()):
+  ResourceStack(size_t segment_size): Stack<E, F>(segment_size, max_uintx)
     { }
 
   // Set the segment pointers to NULL so the parent dtor does not free them;
   // that must be done by the ResourceMark code.
-  ~ResourceStack() { Stack<E>::reset(true); }
+  ~ResourceStack() { Stack<E, F>::reset(true); }
 
 protected:
   virtual E*   alloc(size_t bytes);
@@ -182,13 +183,13 @@
   void clear(bool clear_cache = false);
 };
 
-template <class E>
+template <class E, MEMFLAGS F>
 class StackIterator: public StackObj
 {
 public:
-  StackIterator(Stack<E>& stack): _stack(stack) { sync(); }
+  StackIterator(Stack<E, F>& stack): _stack(stack) { sync(); }
 
-  Stack<E>& stack() const { return _stack; }
+  Stack<E, F>& stack() const { return _stack; }
 
   bool is_empty() const { return _cur_seg == NULL; }
 
@@ -198,7 +199,7 @@
   void sync(); // Sync the iterator's state to the stack's current state.
 
 private:
-  Stack<E>& _stack;
+  Stack<E, F>& _stack;
   size_t    _cur_seg_size;
   E*        _cur_seg;
   size_t    _full_seg_size;
--- a/hotspot/src/share/vm/utilities/stack.inline.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/stack.inline.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -27,7 +27,7 @@
 
 #include "utilities/stack.hpp"
 
-StackBase::StackBase(size_t segment_size, size_t max_cache_size,
+template <MEMFLAGS F> StackBase<F>::StackBase(size_t segment_size, size_t max_cache_size,
                      size_t max_size):
   _seg_size(segment_size),
   _max_cache_size(max_cache_size),
@@ -36,7 +36,7 @@
   assert(_max_size % _seg_size == 0, "not a multiple");
 }
 
-size_t StackBase::adjust_max_size(size_t max_size, size_t seg_size)
+template <MEMFLAGS F> size_t StackBase<F>::adjust_max_size(size_t max_size, size_t seg_size)
 {
   assert(seg_size > 0, "cannot be 0");
   assert(max_size >= seg_size || max_size == 0, "max_size too small");
@@ -47,54 +47,54 @@
   return (max_size + seg_size - 1) / seg_size * seg_size;
 }
 
-template <class E>
-Stack<E>::Stack(size_t segment_size, size_t max_cache_size, size_t max_size):
-  StackBase(adjust_segment_size(segment_size), max_cache_size, max_size)
+template <class E, MEMFLAGS F>
+Stack<E, F>::Stack(size_t segment_size, size_t max_cache_size, size_t max_size):
+  StackBase<F>(adjust_segment_size(segment_size), max_cache_size, max_size)
 {
   reset(true);
 }
 
-template <class E>
-void Stack<E>::push(E item)
+template <class E, MEMFLAGS F>
+void Stack<E, F>::push(E item)
 {
   assert(!is_full(), "pushing onto a full stack");
-  if (_cur_seg_size == _seg_size) {
+  if (this->_cur_seg_size == this->_seg_size) {
     push_segment();
   }
-  _cur_seg[_cur_seg_size] = item;
-  ++_cur_seg_size;
+  this->_cur_seg[this->_cur_seg_size] = item;
+  ++this->_cur_seg_size;
 }
 
-template <class E>
-E Stack<E>::pop()
+template <class E, MEMFLAGS F>
+E Stack<E, F>::pop()
 {
   assert(!is_empty(), "popping from an empty stack");
-  if (_cur_seg_size == 1) {
-    E tmp = _cur_seg[--_cur_seg_size];
+  if (this->_cur_seg_size == 1) {
+    E tmp = _cur_seg[--this->_cur_seg_size];
     pop_segment();
     return tmp;
   }
-  return _cur_seg[--_cur_seg_size];
+  return this->_cur_seg[--this->_cur_seg_size];
 }
 
-template <class E>
-void Stack<E>::clear(bool clear_cache)
+template <class E, MEMFLAGS F>
+void Stack<E, F>::clear(bool clear_cache)
 {
   free_segments(_cur_seg);
   if (clear_cache) free_segments(_cache);
   reset(clear_cache);
 }
 
-template <class E>
-size_t Stack<E>::default_segment_size()
+template <class E, MEMFLAGS F>
+size_t Stack<E, F>::default_segment_size()
 {
   // Number of elements that fit in 4K bytes minus the size of two pointers
   // (link field and malloc header).
   return (4096 - 2 * sizeof(E*)) / sizeof(E);
 }
 
-template <class E>
-size_t Stack<E>::adjust_segment_size(size_t seg_size)
+template <class E, MEMFLAGS F>
+size_t Stack<E, F>::adjust_segment_size(size_t seg_size)
 {
   const size_t elem_sz = sizeof(E);
   const size_t ptr_sz = sizeof(E*);
@@ -105,93 +105,93 @@
   return seg_size;
 }
 
-template <class E>
-size_t Stack<E>::link_offset() const
+template <class E, MEMFLAGS F>
+size_t Stack<E, F>::link_offset() const
 {
-  return align_size_up(_seg_size * sizeof(E), sizeof(E*));
+  return align_size_up(this->_seg_size * sizeof(E), sizeof(E*));
 }
 
-template <class E>
-size_t Stack<E>::segment_bytes() const
+template <class E, MEMFLAGS F>
+size_t Stack<E, F>::segment_bytes() const
 {
   return link_offset() + sizeof(E*);
 }
 
-template <class E>
-E** Stack<E>::link_addr(E* seg) const
+template <class E, MEMFLAGS F>
+E** Stack<E, F>::link_addr(E* seg) const
 {
   return (E**) ((char*)seg + link_offset());
 }
 
-template <class E>
-E* Stack<E>::get_link(E* seg) const
+template <class E, MEMFLAGS F>
+E* Stack<E, F>::get_link(E* seg) const
 {
   return *link_addr(seg);
 }
 
-template <class E>
-E* Stack<E>::set_link(E* new_seg, E* old_seg)
+template <class E, MEMFLAGS F>
+E* Stack<E, F>::set_link(E* new_seg, E* old_seg)
 {
   *link_addr(new_seg) = old_seg;
   return new_seg;
 }
 
-template <class E>
-E* Stack<E>::alloc(size_t bytes)
+template <class E, MEMFLAGS F>
+E* Stack<E, F>::alloc(size_t bytes)
 {
-  return (E*) NEW_C_HEAP_ARRAY(char, bytes);
+  return (E*) NEW_C_HEAP_ARRAY(char, bytes, F);
 }
 
-template <class E>
-void Stack<E>::free(E* addr, size_t bytes)
+template <class E, MEMFLAGS F>
+void Stack<E, F>::free(E* addr, size_t bytes)
 {
-  FREE_C_HEAP_ARRAY(char, (char*) addr);
+  FREE_C_HEAP_ARRAY(char, (char*) addr, F);
 }
 
-template <class E>
-void Stack<E>::push_segment()
+template <class E, MEMFLAGS F>
+void Stack<E, F>::push_segment()
 {
-  assert(_cur_seg_size == _seg_size, "current segment is not full");
+  assert(this->_cur_seg_size == this->_seg_size, "current segment is not full");
   E* next;
-  if (_cache_size > 0) {
+  if (this->_cache_size > 0) {
     // Use a cached segment.
     next = _cache;
     _cache = get_link(_cache);
-    --_cache_size;
+    --this->_cache_size;
   } else {
     next = alloc(segment_bytes());
     DEBUG_ONLY(zap_segment(next, true);)
   }
   const bool at_empty_transition = is_empty();
-  _cur_seg = set_link(next, _cur_seg);
-  _cur_seg_size = 0;
-  _full_seg_size += at_empty_transition ? 0 : _seg_size;
+  this->_cur_seg = set_link(next, _cur_seg);
+  this->_cur_seg_size = 0;
+  this->_full_seg_size += at_empty_transition ? 0 : this->_seg_size;
   DEBUG_ONLY(verify(at_empty_transition);)
 }
 
-template <class E>
-void Stack<E>::pop_segment()
+template <class E, MEMFLAGS F>
+void Stack<E, F>::pop_segment()
 {
-  assert(_cur_seg_size == 0, "current segment is not empty");
+  assert(this->_cur_seg_size == 0, "current segment is not empty");
   E* const prev = get_link(_cur_seg);
-  if (_cache_size < _max_cache_size) {
+  if (this->_cache_size < this->_max_cache_size) {
     // Add the current segment to the cache.
     DEBUG_ONLY(zap_segment(_cur_seg, false);)
     _cache = set_link(_cur_seg, _cache);
-    ++_cache_size;
+    ++this->_cache_size;
   } else {
     DEBUG_ONLY(zap_segment(_cur_seg, true);)
     free(_cur_seg, segment_bytes());
   }
   const bool at_empty_transition = prev == NULL;
-  _cur_seg = prev;
-  _cur_seg_size = _seg_size;
-  _full_seg_size -= at_empty_transition ? 0 : _seg_size;
+  this->_cur_seg = prev;
+  this->_cur_seg_size = this->_seg_size;
+  this->_full_seg_size -= at_empty_transition ? 0 : this->_seg_size;
   DEBUG_ONLY(verify(at_empty_transition);)
 }
 
-template <class E>
-void Stack<E>::free_segments(E* seg)
+template <class E, MEMFLAGS F>
+void Stack<E, F>::free_segments(E* seg)
 {
   const size_t bytes = segment_bytes();
   while (seg != NULL) {
@@ -201,37 +201,37 @@
   }
 }
 
-template <class E>
-void Stack<E>::reset(bool reset_cache)
+template <class E, MEMFLAGS F>
+void Stack<E, F>::reset(bool reset_cache)
 {
-  _cur_seg_size = _seg_size; // So push() will alloc a new segment.
-  _full_seg_size = 0;
+  this->_cur_seg_size = this->_seg_size; // So push() will alloc a new segment.
+  this->_full_seg_size = 0;
   _cur_seg = NULL;
   if (reset_cache) {
-    _cache_size = 0;
+    this->_cache_size = 0;
     _cache = NULL;
   }
 }
 
 #ifdef ASSERT
-template <class E>
-void Stack<E>::verify(bool at_empty_transition) const
+template <class E, MEMFLAGS F>
+void Stack<E, F>::verify(bool at_empty_transition) const
 {
-  assert(size() <= max_size(), "stack exceeded bounds");
-  assert(cache_size() <= max_cache_size(), "cache exceeded bounds");
-  assert(_cur_seg_size <= segment_size(), "segment index exceeded bounds");
+  assert(size() <= this->max_size(), "stack exceeded bounds");
+  assert(this->cache_size() <= this->max_cache_size(), "cache exceeded bounds");
+  assert(this->_cur_seg_size <= this->segment_size(), "segment index exceeded bounds");
 
-  assert(_full_seg_size % _seg_size == 0, "not a multiple");
+  assert(this->_full_seg_size % this->_seg_size == 0, "not a multiple");
   assert(at_empty_transition || is_empty() == (size() == 0), "mismatch");
-  assert((_cache == NULL) == (cache_size() == 0), "mismatch");
+  assert((_cache == NULL) == (this->cache_size() == 0), "mismatch");
 
   if (is_empty()) {
-    assert(_cur_seg_size == segment_size(), "sanity");
+    assert(this->_cur_seg_size == this->segment_size(), "sanity");
   }
 }
 
-template <class E>
-void Stack<E>::zap_segment(E* seg, bool zap_link_field) const
+template <class E, MEMFLAGS F>
+void Stack<E, F>::zap_segment(E* seg, bool zap_link_field) const
 {
   if (!ZapStackSegments) return;
   const size_t zap_bytes = segment_bytes() - (zap_link_field ? 0 : sizeof(E*));
@@ -243,28 +243,28 @@
 }
 #endif
 
-template <class E>
-E* ResourceStack<E>::alloc(size_t bytes)
+template <class E, MEMFLAGS F>
+E* ResourceStack<E, F>::alloc(size_t bytes)
 {
   return (E*) resource_allocate_bytes(bytes);
 }
 
-template <class E>
-void ResourceStack<E>::free(E* addr, size_t bytes)
+template <class E, MEMFLAGS F>
+void ResourceStack<E, F>::free(E* addr, size_t bytes)
 {
   resource_free_bytes((char*) addr, bytes);
 }
 
-template <class E>
-void StackIterator<E>::sync()
+template <class E, MEMFLAGS F>
+void StackIterator<E, F>::sync()
 {
   _full_seg_size = _stack._full_seg_size;
   _cur_seg_size = _stack._cur_seg_size;
   _cur_seg = _stack._cur_seg;
 }
 
-template <class E>
-E* StackIterator<E>::next_addr()
+template <class E, MEMFLAGS F>
+E* StackIterator<E, F>::next_addr()
 {
   assert(!is_empty(), "no items left");
   if (_cur_seg_size == 1) {
--- a/hotspot/src/share/vm/utilities/taskqueue.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/taskqueue.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -132,8 +132,8 @@
 }
 #endif // TASKQUEUE_STATS
 
-template <unsigned int N>
-class TaskQueueSuper: public CHeapObj {
+template <unsigned int N, MEMFLAGS F>
+class TaskQueueSuper: public CHeapObj<F> {
 protected:
   // Internal type for indexing the queue; also used for the tag.
   typedef NOT_LP64(uint16_t) LP64_ONLY(uint32_t) idx_t;
@@ -249,22 +249,27 @@
   TASKQUEUE_STATS_ONLY(TaskQueueStats stats;)
 };
 
-template<class E, unsigned int N = TASKQUEUE_SIZE>
-class GenericTaskQueue: public TaskQueueSuper<N> {
-protected:
-  typedef typename TaskQueueSuper<N>::Age Age;
-  typedef typename TaskQueueSuper<N>::idx_t idx_t;
+
 
-  using TaskQueueSuper<N>::_bottom;
-  using TaskQueueSuper<N>::_age;
-  using TaskQueueSuper<N>::increment_index;
-  using TaskQueueSuper<N>::decrement_index;
-  using TaskQueueSuper<N>::dirty_size;
+template <class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
+class GenericTaskQueue: public TaskQueueSuper<N, F> {
+protected:
+  typedef typename TaskQueueSuper<N, F>::Age Age;
+  typedef typename TaskQueueSuper<N, F>::idx_t idx_t;
+
+  using TaskQueueSuper<N, F>::_bottom;
+  using TaskQueueSuper<N, F>::_age;
+  using TaskQueueSuper<N, F>::increment_index;
+  using TaskQueueSuper<N, F>::decrement_index;
+  using TaskQueueSuper<N, F>::dirty_size;
 
 public:
-  using TaskQueueSuper<N>::max_elems;
-  using TaskQueueSuper<N>::size;
-  TASKQUEUE_STATS_ONLY(using TaskQueueSuper<N>::stats;)
+  using TaskQueueSuper<N, F>::max_elems;
+  using TaskQueueSuper<N, F>::size;
+
+#if  TASKQUEUE_STATS
+  using TaskQueueSuper<N, F>::stats;
+#endif
 
 private:
   // Slow paths for push, pop_local.  (pop_global has no fast path.)
@@ -302,18 +307,18 @@
   volatile E* _elems;
 };
 
-template<class E, unsigned int N>
-GenericTaskQueue<E, N>::GenericTaskQueue() {
+template<class E, MEMFLAGS F, unsigned int N>
+GenericTaskQueue<E, F, N>::GenericTaskQueue() {
   assert(sizeof(Age) == sizeof(size_t), "Depends on this.");
 }
 
-template<class E, unsigned int N>
-void GenericTaskQueue<E, N>::initialize() {
-  _elems = NEW_C_HEAP_ARRAY(E, N);
+template<class E, MEMFLAGS F, unsigned int N>
+void GenericTaskQueue<E, F, N>::initialize() {
+  _elems = NEW_C_HEAP_ARRAY(E, N, F);
 }
 
-template<class E, unsigned int N>
-void GenericTaskQueue<E, N>::oops_do(OopClosure* f) {
+template<class E, MEMFLAGS F, unsigned int N>
+void GenericTaskQueue<E, F, N>::oops_do(OopClosure* f) {
   // tty->print_cr("START OopTaskQueue::oops_do");
   uint iters = size();
   uint index = _bottom;
@@ -329,8 +334,8 @@
   // tty->print_cr("END OopTaskQueue::oops_do");
 }
 
-template<class E, unsigned int N>
-bool GenericTaskQueue<E, N>::push_slow(E t, uint dirty_n_elems) {
+template<class E, MEMFLAGS F, unsigned int N>
+bool GenericTaskQueue<E, F, N>::push_slow(E t, uint dirty_n_elems) {
   if (dirty_n_elems == N - 1) {
     // Actually means 0, so do the push.
     uint localBot = _bottom;
@@ -349,8 +354,8 @@
 // whenever the queue goes empty which it will do here if this thread
 // gets the last task or in pop_global() if the queue wraps (top == 0
 // and pop_global() succeeds, see pop_global()).
-template<class E, unsigned int N>
-bool GenericTaskQueue<E, N>::pop_local_slow(uint localBot, Age oldAge) {
+template<class E, MEMFLAGS F, unsigned int N>
+bool GenericTaskQueue<E, F, N>::pop_local_slow(uint localBot, Age oldAge) {
   // This queue was observed to contain exactly one element; either this
   // thread will claim it, or a competing "pop_global".  In either case,
   // the queue will be logically empty afterwards.  Create a new Age value
@@ -382,8 +387,8 @@
   return false;
 }
 
-template<class E, unsigned int N>
-bool GenericTaskQueue<E, N>::pop_global(E& t) {
+template<class E, MEMFLAGS F, unsigned int N>
+bool GenericTaskQueue<E, F, N>::pop_global(E& t) {
   Age oldAge = _age.get();
   uint localBot = _bottom;
   uint n_elems = size(localBot, oldAge.top());
@@ -402,9 +407,9 @@
   return resAge == oldAge;
 }
 
-template<class E, unsigned int N>
-GenericTaskQueue<E, N>::~GenericTaskQueue() {
-  FREE_C_HEAP_ARRAY(E, _elems);
+template<class E, MEMFLAGS F, unsigned int N>
+GenericTaskQueue<E, F, N>::~GenericTaskQueue() {
+  FREE_C_HEAP_ARRAY(E, _elems, F);
 }
 
 // OverflowTaskQueue is a TaskQueue that also includes an overflow stack for
@@ -418,12 +423,12 @@
 // Note that size() is not hidden--it returns the number of elements in the
 // TaskQueue, and does not include the size of the overflow stack.  This
 // simplifies replacement of GenericTaskQueues with OverflowTaskQueues.
-template<class E, unsigned int N = TASKQUEUE_SIZE>
-class OverflowTaskQueue: public GenericTaskQueue<E, N>
+template<class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
+class OverflowTaskQueue: public GenericTaskQueue<E, F, N>
 {
 public:
-  typedef Stack<E>               overflow_t;
-  typedef GenericTaskQueue<E, N> taskqueue_t;
+  typedef Stack<E, F>               overflow_t;
+  typedef GenericTaskQueue<E, F, N> taskqueue_t;
 
   TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;)
 
@@ -445,8 +450,8 @@
   overflow_t _overflow_stack;
 };
 
-template <class E, unsigned int N>
-bool OverflowTaskQueue<E, N>::push(E t)
+template <class E, MEMFLAGS F, unsigned int N>
+bool OverflowTaskQueue<E, F, N>::push(E t)
 {
   if (!taskqueue_t::push(t)) {
     overflow_stack()->push(t);
@@ -455,15 +460,15 @@
   return true;
 }
 
-template <class E, unsigned int N>
-bool OverflowTaskQueue<E, N>::pop_overflow(E& t)
+template <class E, MEMFLAGS F, unsigned int N>
+bool OverflowTaskQueue<E, F, N>::pop_overflow(E& t)
 {
   if (overflow_empty()) return false;
   t = overflow_stack()->pop();
   return true;
 }
 
-class TaskQueueSetSuper: public CHeapObj {
+class TaskQueueSetSuper {
 protected:
   static int randomParkAndMiller(int* seed0);
 public:
@@ -471,8 +476,11 @@
   virtual bool peek() = 0;
 };
 
-template<class T>
-class GenericTaskQueueSet: public TaskQueueSetSuper {
+template <MEMFLAGS F> class TaskQueueSetSuperImpl: public CHeapObj<F>, public TaskQueueSetSuper {
+};
+
+template<class T, MEMFLAGS F>
+class GenericTaskQueueSet: public TaskQueueSetSuperImpl<F> {
 private:
   uint _n;
   T** _queues;
@@ -482,7 +490,7 @@
 
   GenericTaskQueueSet(int n) : _n(n) {
     typedef T* GenericTaskQueuePtr;
-    _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n);
+    _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n, F);
     for (int i = 0; i < n; i++) {
       _queues[i] = NULL;
     }
@@ -506,19 +514,19 @@
   bool peek();
 };
 
-template<class T> void
-GenericTaskQueueSet<T>::register_queue(uint i, T* q) {
+template<class T, MEMFLAGS F> void
+GenericTaskQueueSet<T, F>::register_queue(uint i, T* q) {
   assert(i < _n, "index out of range.");
   _queues[i] = q;
 }
 
-template<class T> T*
-GenericTaskQueueSet<T>::queue(uint i) {
+template<class T, MEMFLAGS F> T*
+GenericTaskQueueSet<T, F>::queue(uint i) {
   return _queues[i];
 }
 
-template<class T> bool
-GenericTaskQueueSet<T>::steal(uint queue_num, int* seed, E& t) {
+template<class T, MEMFLAGS F> bool
+GenericTaskQueueSet<T, F>::steal(uint queue_num, int* seed, E& t) {
   for (uint i = 0; i < 2 * _n; i++) {
     if (steal_best_of_2(queue_num, seed, t)) {
       TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true));
@@ -529,8 +537,8 @@
   return false;
 }
 
-template<class T> bool
-GenericTaskQueueSet<T>::steal_best_of_all(uint queue_num, int* seed, E& t) {
+template<class T, MEMFLAGS F> bool
+GenericTaskQueueSet<T, F>::steal_best_of_all(uint queue_num, int* seed, E& t) {
   if (_n > 2) {
     int best_k;
     uint best_sz = 0;
@@ -553,11 +561,11 @@
   }
 }
 
-template<class T> bool
-GenericTaskQueueSet<T>::steal_1_random(uint queue_num, int* seed, E& t) {
+template<class T, MEMFLAGS F> bool
+GenericTaskQueueSet<T, F>::steal_1_random(uint queue_num, int* seed, E& t) {
   if (_n > 2) {
     uint k = queue_num;
-    while (k == queue_num) k = randomParkAndMiller(seed) % _n;
+    while (k == queue_num) k = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
     return _queues[2]->pop_global(t);
   } else if (_n == 2) {
     // Just try the other one.
@@ -569,13 +577,13 @@
   }
 }
 
-template<class T> bool
-GenericTaskQueueSet<T>::steal_best_of_2(uint queue_num, int* seed, E& t) {
+template<class T, MEMFLAGS F> bool
+GenericTaskQueueSet<T, F>::steal_best_of_2(uint queue_num, int* seed, E& t) {
   if (_n > 2) {
     uint k1 = queue_num;
-    while (k1 == queue_num) k1 = randomParkAndMiller(seed) % _n;
+    while (k1 == queue_num) k1 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
     uint k2 = queue_num;
-    while (k2 == queue_num || k2 == k1) k2 = randomParkAndMiller(seed) % _n;
+    while (k2 == queue_num || k2 == k1) k2 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
     // Sample both and try the larger.
     uint sz1 = _queues[k1]->size();
     uint sz2 = _queues[k2]->size();
@@ -591,8 +599,8 @@
   }
 }
 
-template<class T>
-bool GenericTaskQueueSet<T>::peek() {
+template<class T, MEMFLAGS F>
+bool GenericTaskQueueSet<T, F>::peek() {
   // Try all the queues.
   for (uint j = 0; j < _n; j++) {
     if (_queues[j]->peek())
@@ -602,7 +610,7 @@
 }
 
 // When to terminate from the termination protocol.
-class TerminatorTerminator: public CHeapObj {
+class TerminatorTerminator: public CHeapObj<mtInternal> {
 public:
   virtual bool should_exit_termination() = 0;
 };
@@ -665,8 +673,8 @@
 #endif
 };
 
-template<class E, unsigned int N> inline bool
-GenericTaskQueue<E, N>::push(E t) {
+template<class E, MEMFLAGS F, unsigned int N> inline bool
+GenericTaskQueue<E, F, N>::push(E t) {
   uint localBot = _bottom;
   assert((localBot >= 0) && (localBot < N), "_bottom out of range.");
   idx_t top = _age.top();
@@ -683,8 +691,8 @@
   }
 }
 
-template<class E, unsigned int N> inline bool
-GenericTaskQueue<E, N>::pop_local(E& t) {
+template<class E, MEMFLAGS F, unsigned int N> inline bool
+GenericTaskQueue<E, F, N>::pop_local(E& t) {
   uint localBot = _bottom;
   // This value cannot be N-1.  That can only occur as a result of
   // the assignment to bottom in this method.  If it does, this method
@@ -715,8 +723,8 @@
   }
 }
 
-typedef GenericTaskQueue<oop>             OopTaskQueue;
-typedef GenericTaskQueueSet<OopTaskQueue> OopTaskQueueSet;
+typedef GenericTaskQueue<oop, mtGC>             OopTaskQueue;
+typedef GenericTaskQueueSet<OopTaskQueue, mtGC> OopTaskQueueSet;
 
 #ifdef _MSC_VER
 #pragma warning(push)
@@ -796,11 +804,11 @@
 #pragma warning(pop)
 #endif
 
-typedef OverflowTaskQueue<StarTask>           OopStarTaskQueue;
-typedef GenericTaskQueueSet<OopStarTaskQueue> OopStarTaskQueueSet;
+typedef OverflowTaskQueue<StarTask, mtClass>           OopStarTaskQueue;
+typedef GenericTaskQueueSet<OopStarTaskQueue, mtClass> OopStarTaskQueueSet;
 
-typedef OverflowTaskQueue<size_t>             RegionTaskQueue;
-typedef GenericTaskQueueSet<RegionTaskQueue>  RegionTaskQueueSet;
+typedef OverflowTaskQueue<size_t, mtInternal>             RegionTaskQueue;
+typedef GenericTaskQueueSet<RegionTaskQueue, mtClass>     RegionTaskQueueSet;
 
 
 #endif // SHARE_VM_UTILITIES_TASKQUEUE_HPP
--- a/hotspot/src/share/vm/utilities/vmError.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/vmError.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -33,6 +33,7 @@
 #include "runtime/thread.hpp"
 #include "runtime/vmThread.hpp"
 #include "runtime/vm_operations.hpp"
+#include "services/memTracker.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/decoder.hpp"
 #include "utilities/defaultStream.hpp"
@@ -818,6 +819,9 @@
   static bool transmit_report_done = false; // done error reporting
   static fdStream log;                  // error log
 
+  // disble NMT to avoid further exception
+  MemTracker::shutdown(MemTracker::NMT_error_reporting);
+
   if (SuppressFatalErrorMessage) {
       os::abort();
   }
--- a/hotspot/src/share/vm/utilities/workgroup.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/workgroup.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -77,7 +77,7 @@
                   name(),
                   total_workers());
   }
-  _gang_workers = NEW_C_HEAP_ARRAY(GangWorker*, total_workers());
+  _gang_workers = NEW_C_HEAP_ARRAY(GangWorker*, total_workers(), mtInternal);
   if (gang_workers() == NULL) {
     vm_exit_out_of_memory(0, "Cannot create GangWorker array.");
     return false;
@@ -241,6 +241,7 @@
 
 void GangWorker::initialize() {
   this->initialize_thread_local_storage();
+  this->record_stack_base_and_size();
   assert(_gang != NULL, "No gang to run in");
   os::set_priority(this, NearMaxPriority);
   if (TraceWorkGang) {
@@ -421,7 +422,7 @@
 
 SubTasksDone::SubTasksDone(uint n) :
   _n_tasks(n), _n_threads(1), _tasks(NULL) {
-  _tasks = NEW_C_HEAP_ARRAY(uint, n);
+  _tasks = NEW_C_HEAP_ARRAY(uint, n, mtInternal);
   guarantee(_tasks != NULL, "alloc failure");
   clear();
 }
@@ -476,7 +477,7 @@
 
 
 SubTasksDone::~SubTasksDone() {
-  if (_tasks != NULL) FREE_C_HEAP_ARRAY(jint, _tasks);
+  if (_tasks != NULL) FREE_C_HEAP_ARRAY(jint, _tasks, mtInternal);
 }
 
 // *** SequentialSubTasksDone
--- a/hotspot/src/share/vm/utilities/workgroup.hpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/workgroup.hpp	Thu Jun 28 17:03:16 2012 -0400
@@ -123,7 +123,7 @@
 // Class AbstractWorkGang:
 // An abstract class representing a gang of workers.
 // You subclass this to supply an implementation of run_task().
-class AbstractWorkGang: public CHeapObj {
+class AbstractWorkGang: public CHeapObj<mtInternal> {
   // Here's the public interface to this class.
 public:
   // Constructor and destructor.
@@ -402,7 +402,7 @@
 // subtasks will be identified by integer indices, usually elements of an
 // enumeration type.
 
-class SubTasksDone : public CHeapObj {
+class SubTasksDone: public CHeapObj<mtInternal> {
   uint* _tasks;
   uint _n_tasks;
   // _n_threads is used to determine when a sub task is done.
--- a/hotspot/src/share/vm/utilities/xmlstream.cpp	Wed Jun 27 15:23:36 2012 +0200
+++ b/hotspot/src/share/vm/utilities/xmlstream.cpp	Thu Jun 28 17:03:16 2012 -0400
@@ -43,7 +43,7 @@
 #ifdef ASSERT
   _element_depth = 0;
   int   init_len = 100;
-  char* init_buf = NEW_C_HEAP_ARRAY(char, init_len);
+  char* init_buf = NEW_C_HEAP_ARRAY(char, init_len, mtInternal);
   _element_close_stack_low  = init_buf;
   _element_close_stack_high = init_buf + init_len;
   _element_close_stack_ptr  = init_buf + init_len - 1;
@@ -58,7 +58,7 @@
 
 #ifdef ASSERT
 xmlStream::~xmlStream() {
-  FREE_C_HEAP_ARRAY(char, _element_close_stack_low);
+  FREE_C_HEAP_ARRAY(char, _element_close_stack_low, mtInternal);
 }
 #endif
 
@@ -155,14 +155,14 @@
     int old_len = _element_close_stack_high - old_ptr;
     int new_len = old_len * 2;
     if (new_len < 100)  new_len = 100;
-    char* new_low  = NEW_C_HEAP_ARRAY(char, new_len);
+    char* new_low  = NEW_C_HEAP_ARRAY(char, new_len, mtInternal);
     char* new_high = new_low + new_len;
     char* new_ptr  = new_high - old_len;
     memcpy(new_ptr, old_ptr, old_len);
     _element_close_stack_high = new_high;
     _element_close_stack_low  = new_low;
     _element_close_stack_ptr  = new_ptr;
-    FREE_C_HEAP_ARRAY(char, old_low);
+    FREE_C_HEAP_ARRAY(char, old_low, mtInternal);
     push_ptr = new_ptr - (tag_len+1);
   }
   assert(push_ptr >= _element_close_stack_low, "in range");