Merge
authorlana
Thu, 28 Aug 2014 14:53:43 -0700
changeset 25963 fabe6090d37a
parent 25886 fe4bbd4ffc73 (current diff)
parent 25962 8f95d0407e21 (diff)
child 25964 a6a096238d10
Merge
hotspot/src/share/vm/services/memPtr.cpp
hotspot/src/share/vm/services/memPtr.hpp
hotspot/src/share/vm/services/memPtrArray.hpp
hotspot/src/share/vm/services/memRecorder.cpp
hotspot/src/share/vm/services/memRecorder.hpp
hotspot/src/share/vm/services/memSnapshot.cpp
hotspot/src/share/vm/services/memSnapshot.hpp
hotspot/src/share/vm/services/memTrackWorker.cpp
hotspot/src/share/vm/services/memTrackWorker.hpp
--- a/hotspot/agent/src/os/solaris/proc/saproc.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/agent/src/os/solaris/proc/saproc.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -314,7 +314,7 @@
     handle = dlopen(name, mode);
   }
   if (_libsaproc_debug) {
-    printf("libsaproc DEBUG: pathmap_dlopen %s return 0x%x\n", name, handle);
+    printf("libsaproc DEBUG: pathmap_dlopen %s return 0x%lx\n", name, (unsigned long) handle);
   }
   return handle;
 }
@@ -661,30 +661,30 @@
   // read FileMapHeader
   size_t n = read(fd, pheader, sizeof(struct FileMapHeader));
   if (n != sizeof(struct FileMapHeader)) {
-    free(pheader);
-    close(fd);
     char errMsg[ERR_MSG_SIZE];
     sprintf(errMsg, "unable to read shared archive file map header from %s", classes_jsa);
+    close(fd);
+    free(pheader);
     THROW_NEW_DEBUGGER_EXCEPTION_(errMsg, 1);
   }
 
   // check file magic
   if (pheader->_magic != 0xf00baba2) {
-    free(pheader);
-    close(fd);
     char errMsg[ERR_MSG_SIZE];
     sprintf(errMsg, "%s has bad shared archive magic 0x%x, expecting 0xf00baba2",
                    classes_jsa, pheader->_magic);
+    close(fd);
+    free(pheader);
     THROW_NEW_DEBUGGER_EXCEPTION_(errMsg, 1);
   }
 
   // check version
   if (pheader->_version != CURRENT_ARCHIVE_VERSION) {
-    free(pheader);
-    close(fd);
     char errMsg[ERR_MSG_SIZE];
     sprintf(errMsg, "%s has wrong shared archive version %d, expecting %d",
                    classes_jsa, pheader->_version, CURRENT_ARCHIVE_VERSION);
+    close(fd);
+    free(pheader);
     THROW_NEW_DEBUGGER_EXCEPTION_(errMsg, 1);
   }
 
--- a/hotspot/make/excludeSrc.make	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/make/excludeSrc.make	Thu Aug 28 14:53:43 2014 -0700
@@ -119,8 +119,8 @@
       CFLAGS += -DINCLUDE_NMT=0
 
       Src_Files_EXCLUDE += \
-	 memBaseline.cpp memPtr.cpp memRecorder.cpp memReporter.cpp memSnapshot.cpp memTrackWorker.cpp \
-	 memTracker.cpp nmtDCmd.cpp
+	 memBaseline.cpp memReporter.cpp mallocTracker.cpp virtualMemoryTracker.cpp nmtCommon.cpp \
+	 memTracker.cpp nmtDCmd.cpp mallocSiteTable.cpp
 endif
 
 -include $(HS_ALT_MAKE)/excludeSrc.make
--- a/hotspot/make/jprt.properties	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/make/jprt.properties	Thu Aug 28 14:53:43 2014 -0700
@@ -356,14 +356,15 @@
   ${jprt.my.windows.x64}-fastdebug-c2-internalvmtests
 
 jprt.make.rule.test.targets.standard.reg.group = \
-  ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GROUP, \
-  ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GROUP, \
-  ${jprt.my.linux.i586}-{product|fastdebug}-c2-GROUP, \
-  ${jprt.my.linux.x64}-{product|fastdebug}-c2-GROUP, \
-  ${jprt.my.windows.i586}-{product|fastdebug}-c2-GROUP, \
-  ${jprt.my.windows.x64}-{product|fastdebug}-c2-GROUP, \
-  ${jprt.my.linux.i586}-{product|fastdebug}-c1-GROUP, \
-  ${jprt.my.windows.i586}-{product|fastdebug}-c1-GROUP
+  ${jprt.my.solaris.sparcv9}-fastdebug-c2-GROUP, \
+  ${jprt.my.solaris.x64}-fastdebug-c2-GROUP, \
+  ${jprt.my.linux.i586}-fastdebug-c2-GROUP, \
+  ${jprt.my.linux.x64}-fastdebug-c2-GROUP, \
+  ${jprt.my.macosx.x64}-fastdebug-c2-GROUP, \
+  ${jprt.my.windows.i586}-fastdebug-c2-GROUP, \
+  ${jprt.my.windows.x64}-fastdebug-c2-GROUP, \
+  ${jprt.my.linux.i586}-fastdebug-c1-GROUP, \
+  ${jprt.my.windows.i586}-fastdebug-c1-GROUP
 
 jprt.make.rule.test.targets.standard = \
   ${jprt.make.rule.test.targets.standard.client}, \
--- a/hotspot/make/solaris/makefiles/dtrace.make	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/make/solaris/makefiles/dtrace.make	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -58,6 +58,8 @@
 DTRACE_COMMON_SRCDIR = $(GAMMADIR)/src/os/posix/dtrace
 DTRACE = dtrace
 DTRACE.o = $(DTRACE).o
+DTRACE_JHELPER = dtrace_jhelper
+DTRACE_JHELPER.o = $(DTRACE_JHELPER).o
 
 # to remove '-g' option which causes link problems
 # also '-z nodefs' is used as workaround
@@ -255,7 +257,10 @@
 endif
 
 $(DTRACE).d: $(DTRACE_COMMON_SRCDIR)/hotspot.d $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d \
-             $(DTRACE_COMMON_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d
+             $(DTRACE_COMMON_SRCDIR)/hs_private.d
+	$(QUIETLY) cat $^ > $@
+
+$(DTRACE_JHELPER).d: $(DTRACE_SRCDIR)/jhelper.d
 	$(QUIETLY) cat $^ > $@
 
 DTraced_Files = ciEnv.o \
@@ -280,7 +285,7 @@
                 vmGCOperations.o \
 
 # Dtrace is available, so we build $(DTRACE.o)  
-$(DTRACE.o): $(DTRACE).d $(JVMOFFS).h $(JVMOFFS)Index.h $(DTraced_Files)
+$(DTRACE.o): $(DTRACE).d $(DTraced_Files)
 	@echo Compiling $(DTRACE).d
 
 	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -G -xlazyload -o $@ -s $(DTRACE).d \
@@ -344,6 +349,11 @@
 
 dtrace_gen_headers: $(DtraceOutDir)/hotspot.h $(DtraceOutDir)/hotspot_jni.h $(DtraceOutDir)/hs_private.h
 
+# The jhelper.d and hotspot probes are separated into two different SUNW_dof sections.
+# Now the jhelper.d is built without the -Xlazyload flag.
+$(DTRACE_JHELPER.o) : $(DTRACE_JHELPER).d $(JVMOFFS).h $(JVMOFFS)Index.h
+	@echo Compiling $(DTRACE_JHELPER).d
+	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -G -o $@ -s $(DTRACE_JHELPER).d
 
 .PHONY: dtraceCheck
 
@@ -372,7 +382,7 @@
 ifneq ("${DTRACE_PROG}", "")
 ifeq ("${HOTSPOT_DISABLE_DTRACE_PROBES}", "")
 
-DTRACE_OBJS = $(DTRACE.o) $(JVMOFFS.o)
+DTRACE_OBJS = $(DTRACE.o) $(JVMOFFS.o) $(DTRACE_JHELPER.o)
 CFLAGS += $(DTRACE_INCL) -DDTRACE_ENABLED
 MAPFILE_DTRACE_OPT = $(MAPFILE_DTRACE)
 
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -298,6 +298,7 @@
     LWZ_OPCODE   = (32u << OPCODE_SHIFT),
     LWZX_OPCODE  = (31u << OPCODE_SHIFT |  23u << 1),
     LWZU_OPCODE  = (33u << OPCODE_SHIFT),
+    LWBRX_OPCODE = (31u << OPCODE_SHIFT |  534 << 1),
 
     LHA_OPCODE   = (42u << OPCODE_SHIFT),
     LHAX_OPCODE  = (31u << OPCODE_SHIFT | 343u << 1),
@@ -306,6 +307,7 @@
     LHZ_OPCODE   = (40u << OPCODE_SHIFT),
     LHZX_OPCODE  = (31u << OPCODE_SHIFT | 279u << 1),
     LHZU_OPCODE  = (41u << OPCODE_SHIFT),
+    LHBRX_OPCODE = (31u << OPCODE_SHIFT |  790 << 1),
 
     LBZ_OPCODE   = (34u << OPCODE_SHIFT),
     LBZX_OPCODE  = (31u << OPCODE_SHIFT |  87u << 1),
@@ -1364,11 +1366,17 @@
   inline void lwax( Register d, Register s1, Register s2);
   inline void lwa(  Register d, int si16,    Register s1);
 
+  // 4 bytes reversed
+  inline void lwbrx( Register d, Register s1, Register s2);
+
   // 2 bytes
   inline void lhzx( Register d, Register s1, Register s2);
   inline void lhz(  Register d, int si16,    Register s1);
   inline void lhzu( Register d, int si16,    Register s1);
 
+  // 2 bytes reversed
+  inline void lhbrx( Register d, Register s1, Register s2);
+
   // 2 bytes
   inline void lhax( Register d, Register s1, Register s2);
   inline void lha(  Register d, int si16,    Register s1);
@@ -1858,10 +1866,12 @@
   inline void lwz(  Register d, int si16);
   inline void lwax( Register d, Register s2);
   inline void lwa(  Register d, int si16);
+  inline void lwbrx(Register d, Register s2);
   inline void lhzx( Register d, Register s2);
   inline void lhz(  Register d, int si16);
   inline void lhax( Register d, Register s2);
   inline void lha(  Register d, int si16);
+  inline void lhbrx(Register d, Register s2);
   inline void lbzx( Register d, Register s2);
   inline void lbz(  Register d, int si16);
   inline void ldx(  Register d, Register s2);
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -263,10 +263,14 @@
 inline void Assembler::lwax( Register d, Register s1, Register s2) { emit_int32(LWAX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
 inline void Assembler::lwa(  Register d, int si16,    Register s1) { emit_int32(LWA_OPCODE  | rt(d) | ds(si16)   | ra0mem(s1));}
 
+inline void Assembler::lwbrx( Register d, Register s1, Register s2) { emit_int32(LWBRX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+
 inline void Assembler::lhzx( Register d, Register s1, Register s2) { emit_int32(LHZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
 inline void Assembler::lhz(  Register d, int si16,    Register s1) { emit_int32(LHZ_OPCODE  | rt(d) | d1(si16)   | ra0mem(s1));}
 inline void Assembler::lhzu( Register d, int si16,    Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LHZU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
 
+inline void Assembler::lhbrx( Register d, Register s1, Register s2) { emit_int32(LHBRX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+
 inline void Assembler::lhax( Register d, Register s1, Register s2) { emit_int32(LHAX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
 inline void Assembler::lha(  Register d, int si16,    Register s1) { emit_int32(LHA_OPCODE  | rt(d) | d1(si16)   | ra0mem(s1));}
 inline void Assembler::lhau( Register d, int si16,    Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LHAU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
@@ -736,10 +740,12 @@
 inline void Assembler::lwz(  Register d, int si16   ) { emit_int32( LWZ_OPCODE  | rt(d) | d1(si16));}
 inline void Assembler::lwax( Register d, Register s2) { emit_int32( LWAX_OPCODE | rt(d) | rb(s2));}
 inline void Assembler::lwa(  Register d, int si16   ) { emit_int32( LWA_OPCODE  | rt(d) | ds(si16));}
+inline void Assembler::lwbrx(Register d, Register s2) { emit_int32( LWBRX_OPCODE| rt(d) | rb(s2));}
 inline void Assembler::lhzx( Register d, Register s2) { emit_int32( LHZX_OPCODE | rt(d) | rb(s2));}
 inline void Assembler::lhz(  Register d, int si16   ) { emit_int32( LHZ_OPCODE  | rt(d) | d1(si16));}
 inline void Assembler::lhax( Register d, Register s2) { emit_int32( LHAX_OPCODE | rt(d) | rb(s2));}
 inline void Assembler::lha(  Register d, int si16   ) { emit_int32( LHA_OPCODE  | rt(d) | d1(si16));}
+inline void Assembler::lhbrx(Register d, Register s2) { emit_int32( LHBRX_OPCODE| rt(d) | rb(s2));}
 inline void Assembler::lbzx( Register d, Register s2) { emit_int32( LBZX_OPCODE | rt(d) | rb(s2));}
 inline void Assembler::lbz(  Register d, int si16   ) { emit_int32( LBZ_OPCODE  | rt(d) | d1(si16));}
 inline void Assembler::ld(   Register d, int si16   ) { emit_int32( LD_OPCODE   | rt(d) | ds(si16));}
--- a/hotspot/src/cpu/ppc/vm/cppInterpreterGenerator_ppc.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/cppInterpreterGenerator_ppc.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2012, 2013 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -26,8 +26,9 @@
 #ifndef CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
 #define CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
 
-  address generate_normal_entry(void);
-  address generate_native_entry(void);
+  address generate_normal_entry(bool synchronized);
+  address generate_native_entry(bool synchronized);
+  address generate_math_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
 
   void lock_method(void);
   void unlock_method(void);
--- a/hotspot/src/cpu/ppc/vm/cppInterpreter_ppc.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/cppInterpreter_ppc.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -938,8 +938,9 @@
 // Interpreter stub for calling a native method. (C++ interpreter)
 // This sets up a somewhat different looking stack for calling the native method
 // than the typical interpreter frame setup.
+// The synchronized parameter is ignored.
 //
-address CppInterpreterGenerator::generate_native_entry(void) {
+address CppInterpreterGenerator::generate_native_entry(bool synchronized) {
   if (native_entry != NULL) return native_entry;
   address entry = __ pc();
 
@@ -1729,7 +1730,8 @@
   __ std(R0, BasicObjectLock::obj_offset_in_bytes(), stack_base);  // Mark lock as unused
 }
 
-address CppInterpreterGenerator::generate_normal_entry(void) {
+// The synchronized parameter is ignored
+address CppInterpreterGenerator::generate_normal_entry(bool synchronized) {
   if (interpreter_frame_manager != NULL) return interpreter_frame_manager;
 
   address entry = __ pc();
@@ -2789,38 +2791,6 @@
   return interpreter_frame_manager;
 }
 
-// Generate code for various sorts of method entries
-//
-address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
-  address entry_point = NULL;
-
-  switch (kind) {
-    case Interpreter::zerolocals                 :                                                                              break;
-    case Interpreter::zerolocals_synchronized    :                                                                              break;
-    case Interpreter::native                     : // Fall thru
-    case Interpreter::native_synchronized        : entry_point = ((CppInterpreterGenerator*)this)->generate_native_entry();     break;
-    case Interpreter::empty                      :                                                                              break;
-    case Interpreter::accessor                   : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry();      break;
-    case Interpreter::abstract                   : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry();      break;
-    // These are special interpreter intrinsics which we don't support so far.
-    case Interpreter::java_lang_math_sin         :                                                                              break;
-    case Interpreter::java_lang_math_cos         :                                                                              break;
-    case Interpreter::java_lang_math_tan         :                                                                              break;
-    case Interpreter::java_lang_math_abs         :                                                                              break;
-    case Interpreter::java_lang_math_log         :                                                                              break;
-    case Interpreter::java_lang_math_log10       :                                                                              break;
-    case Interpreter::java_lang_math_sqrt        :                                                                              break;
-    case Interpreter::java_lang_math_pow         :                                                                              break;
-    case Interpreter::java_lang_math_exp         :                                                                              break;
-    case Interpreter::java_lang_ref_reference_get: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
-    default                                      : ShouldNotReachHere();                                                        break;
-  }
-
-  if (entry_point) {
-    return entry_point;
-  }
-  return ((InterpreterGenerator*)this)->generate_normal_entry();
-}
 
 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
  : CppInterpreterGenerator(code) {
--- a/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -119,9 +119,15 @@
     // Call the Interpreter::remove_activation_preserving_args_entry()
     // func to get the address of the same-named entrypoint in the
     // generated interpreter code.
+#if defined(ABI_ELFv2)
+    call_c(CAST_FROM_FN_PTR(address,
+                            Interpreter::remove_activation_preserving_args_entry),
+           relocInfo::none);
+#else
     call_c(CAST_FROM_FN_PTR(FunctionDescriptor*,
                             Interpreter::remove_activation_preserving_args_entry),
            relocInfo::none);
+#endif
 
     // Jump to Interpreter::_remove_activation_preserving_args_entry.
     mtctr(R3_RET);
@@ -331,29 +337,40 @@
 void InterpreterMacroAssembler::get_2_byte_integer_at_bcp(int         bcp_offset,
                                                           Register    Rdst,
                                                           signedOrNot is_signed) {
+#if defined(VM_LITTLE_ENDIAN)
+  if (bcp_offset) {
+    load_const_optimized(Rdst, bcp_offset);
+    lhbrx(Rdst, R14_bcp, Rdst);
+  } else {
+    lhbrx(Rdst, R14_bcp);
+  }
+  if (is_signed == Signed) {
+    extsh(Rdst, Rdst);
+  }
+#else
   // Read Java big endian format.
   if (is_signed == Signed) {
     lha(Rdst, bcp_offset, R14_bcp);
   } else {
     lhz(Rdst, bcp_offset, R14_bcp);
   }
-#if 0
-  assert(Rtmp != Rdst, "need separate temp register");
-  Register Rfirst = Rtmp;
-  lbz(Rfirst, bcp_offset, R14_bcp); // first byte
-  lbz(Rdst, bcp_offset+1, R14_bcp); // second byte
-
-  // Rdst = ((Rfirst<<8) & 0xFF00) | (Rdst &~ 0xFF00)
-  rldimi(/*RA=*/Rdst, /*RS=*/Rfirst, /*sh=*/8, /*mb=*/48);
-  if (is_signed == Signed) {
-    extsh(Rdst, Rdst);
-  }
 #endif
 }
 
 void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(int         bcp_offset,
                                                           Register    Rdst,
                                                           signedOrNot is_signed) {
+#if defined(VM_LITTLE_ENDIAN)
+  if (bcp_offset) {
+    load_const_optimized(Rdst, bcp_offset);
+    lwbrx(Rdst, R14_bcp, Rdst);
+  } else {
+    lwbrx(Rdst, R14_bcp);
+  }
+  if (is_signed == Signed) {
+    extsw(Rdst, Rdst);
+  }
+#else
   // Read Java big endian format.
   if (bcp_offset & 3) { // Offset unaligned?
     load_const_optimized(Rdst, bcp_offset);
@@ -369,18 +386,26 @@
       lwz(Rdst, bcp_offset, R14_bcp);
     }
   }
+#endif
 }
 
+
 // Load the constant pool cache index from the bytecode stream.
 //
 // Kills / writes:
 //   - Rdst, Rscratch
 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register Rdst, int bcp_offset, size_t index_size) {
   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
+  // Cache index is always in the native format, courtesy of Rewriter.
   if (index_size == sizeof(u2)) {
-    get_2_byte_integer_at_bcp(bcp_offset, Rdst, Unsigned);
+    lhz(Rdst, bcp_offset, R14_bcp);
   } else if (index_size == sizeof(u4)) {
-    get_4_byte_integer_at_bcp(bcp_offset, Rdst, Signed);
+    if (bcp_offset & 3) {
+      load_const_optimized(Rdst, bcp_offset);
+      lwax(Rdst, R14_bcp, Rdst);
+    } else {
+      lwa(Rdst, bcp_offset, R14_bcp);
+    }
     assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
     nand(Rdst, Rdst, Rdst); // convert to plain index
   } else if (index_size == sizeof(u1)) {
@@ -397,6 +422,29 @@
   add(cache, R27_constPoolCache, cache);
 }
 
+// Load 4-byte signed or unsigned integer in Java format (that is, big-endian format)
+// from (Rsrc)+offset.
+void InterpreterMacroAssembler::get_u4(Register Rdst, Register Rsrc, int offset,
+                                       signedOrNot is_signed) {
+#if defined(VM_LITTLE_ENDIAN)
+  if (offset) {
+    load_const_optimized(Rdst, offset);
+    lwbrx(Rdst, Rdst, Rsrc);
+  } else {
+    lwbrx(Rdst, Rsrc);
+  }
+  if (is_signed == Signed) {
+    extsw(Rdst, Rdst);
+  }
+#else
+  if (is_signed == Signed) {
+    lwa(Rdst, offset, Rsrc);
+  } else {
+    lwz(Rdst, offset, Rsrc);
+  }
+#endif
+}
+
 // Load object from cpool->resolved_references(index).
 void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result, Register index) {
   assert_different_registers(result, index);
--- a/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -130,6 +130,7 @@
 
   void get_cache_and_index_at_bcp(Register cache, int bcp_offset, size_t index_size = sizeof(u2));
 
+  void get_u4(Register Rdst, Register Rsrc, int offset, signedOrNot is_signed);
 
   // common code
 
--- a/hotspot/src/cpu/ppc/vm/interpreterGenerator_ppc.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/interpreterGenerator_ppc.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2012, 2013 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -31,7 +31,12 @@
  private:
 
   address generate_abstract_entry(void);
-  address generate_accessor_entry(void);
+  address generate_jump_to_normal_entry(void);
+  address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
+  address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
   address generate_Reference_get_entry(void);
 
+  // Not supported
+  address generate_CRC32_update_entry() { return NULL; }
+  address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
 #endif // CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP
--- a/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -428,6 +428,19 @@
   return entry;
 }
 
+
+// Call an accessor method (assuming it is resolved, otherwise drop into
+// vanilla (slow path) entry.
+address InterpreterGenerator::generate_jump_to_normal_entry(void) {
+  address entry = __ pc();
+  address normal_entry = Interpreter::entry_for_kind(Interpreter::zerolocals);
+  assert(normal_entry != NULL, "should already be generated.");
+  __ branch_to_entry(normal_entry, R11_scratch1);
+  __ flush();
+
+  return entry;
+}
+
 // Abstract method entry.
 //
 address InterpreterGenerator::generate_abstract_entry(void) {
@@ -485,203 +498,6 @@
   return entry;
 }
 
-// Call an accessor method (assuming it is resolved, otherwise drop into
-// vanilla (slow path) entry.
-address InterpreterGenerator::generate_accessor_entry(void) {
-  if (!UseFastAccessorMethods && (!FLAG_IS_ERGO(UseFastAccessorMethods))) {
-    return NULL;
-  }
-
-  Label Lslow_path, Lacquire;
-
-  const Register
-         Rclass_or_obj = R3_ARG1,
-         Rconst_method = R4_ARG2,
-         Rcodes        = Rconst_method,
-         Rcpool_cache  = R5_ARG3,
-         Rscratch      = R11_scratch1,
-         Rjvmti_mode   = Rscratch,
-         Roffset       = R12_scratch2,
-         Rflags        = R6_ARG4,
-         Rbtable       = R7_ARG5;
-
-  static address branch_table[number_of_states];
-
-  address entry = __ pc();
-
-  // Check for safepoint:
-  // Ditch this, real man don't need safepoint checks.
-
-  // Also check for JVMTI mode
-  // Check for null obj, take slow path if so.
-  __ ld(Rclass_or_obj, Interpreter::stackElementSize, CC_INTERP_ONLY(R17_tos) NOT_CC_INTERP(R15_esp));
-  __ lwz(Rjvmti_mode, thread_(interp_only_mode));
-  __ cmpdi(CCR1, Rclass_or_obj, 0);
-  __ cmpwi(CCR0, Rjvmti_mode, 0);
-  __ crorc(/*CCR0 eq*/2, /*CCR1 eq*/4+2, /*CCR0 eq*/2);
-  __ beq(CCR0, Lslow_path); // this==null or jvmti_mode!=0
-
-  // Do 2 things in parallel:
-  // 1. Load the index out of the first instruction word, which looks like this:
-  //    <0x2a><0xb4><index (2 byte, native endianess)>.
-  // 2. Load constant pool cache base.
-  __ ld(Rconst_method, in_bytes(Method::const_offset()), R19_method);
-  __ ld(Rcpool_cache, in_bytes(ConstMethod::constants_offset()), Rconst_method);
-
-  __ lhz(Rcodes, in_bytes(ConstMethod::codes_offset()) + 2, Rconst_method); // Lower half of 32 bit field.
-  __ ld(Rcpool_cache, ConstantPool::cache_offset_in_bytes(), Rcpool_cache);
-
-  // Get the const pool entry by means of <index>.
-  const int codes_shift = exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord);
-  __ slwi(Rscratch, Rcodes, codes_shift); // (codes&0xFFFF)<<codes_shift
-  __ add(Rcpool_cache, Rscratch, Rcpool_cache);
-
-  // Check if cpool cache entry is resolved.
-  // We are resolved if the indices offset contains the current bytecode.
-  ByteSize cp_base_offset = ConstantPoolCache::base_offset();
-  // Big Endian:
-  __ lbz(Rscratch, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::indices_offset()) + 7 - 2, Rcpool_cache);
-  __ cmpwi(CCR0, Rscratch, Bytecodes::_getfield);
-  __ bne(CCR0, Lslow_path);
-  __ isync(); // Order succeeding loads wrt. load of _indices field from cpool_cache.
-
-  // Finally, start loading the value: Get cp cache entry into regs.
-  __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcpool_cache);
-  __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcpool_cache);
-
-  // Following code is from templateTable::getfield_or_static
-  // Load pointer to branch table
-  __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
-
-  // Get volatile flag
-  __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // extract volatile bit
-  // note: sync is needed before volatile load on PPC64
-
-  // Check field type
-  __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
-
-#ifdef ASSERT
-  Label LFlagInvalid;
-  __ cmpldi(CCR0, Rflags, number_of_states);
-  __ bge(CCR0, LFlagInvalid);
-
-  __ ld(R9_ARG7, 0, R1_SP);
-  __ ld(R10_ARG8, 0, R21_sender_SP);
-  __ cmpd(CCR0, R9_ARG7, R10_ARG8);
-  __ asm_assert_eq("backlink", 0x543);
-#endif // ASSERT
-  __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
-
-  // Load from branch table and dispatch (volatile case: one instruction ahead)
-  __ sldi(Rflags, Rflags, LogBytesPerWord);
-  __ cmpwi(CCR6, Rscratch, 1); // volatile?
-  if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
-    __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // volatile ? size of 1 instruction : 0
-  }
-  __ ldx(Rbtable, Rbtable, Rflags);
-
-  if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
-    __ subf(Rbtable, Rscratch, Rbtable); // point to volatile/non-volatile entry point
-  }
-  __ mtctr(Rbtable);
-  __ bctr();
-
-#ifdef ASSERT
-  __ bind(LFlagInvalid);
-  __ stop("got invalid flag", 0x6541);
-
-  bool all_uninitialized = true,
-       all_initialized   = true;
-  for (int i = 0; i<number_of_states; ++i) {
-    all_uninitialized = all_uninitialized && (branch_table[i] == NULL);
-    all_initialized   = all_initialized   && (branch_table[i] != NULL);
-  }
-  assert(all_uninitialized != all_initialized, "consistency"); // either or
-
-  __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
-  if (branch_table[vtos] == 0) branch_table[vtos] = __ pc(); // non-volatile_entry point
-  if (branch_table[dtos] == 0) branch_table[dtos] = __ pc(); // non-volatile_entry point
-  if (branch_table[ftos] == 0) branch_table[ftos] = __ pc(); // non-volatile_entry point
-  __ stop("unexpected type", 0x6551);
-#endif
-
-  if (branch_table[itos] == 0) { // generate only once
-    __ align(32, 28, 28); // align load
-    __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
-    branch_table[itos] = __ pc(); // non-volatile_entry point
-    __ lwax(R3_RET, Rclass_or_obj, Roffset);
-    __ beq(CCR6, Lacquire);
-    __ blr();
-  }
-
-  if (branch_table[ltos] == 0) { // generate only once
-    __ align(32, 28, 28); // align load
-    __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
-    branch_table[ltos] = __ pc(); // non-volatile_entry point
-    __ ldx(R3_RET, Rclass_or_obj, Roffset);
-    __ beq(CCR6, Lacquire);
-    __ blr();
-  }
-
-  if (branch_table[btos] == 0) { // generate only once
-    __ align(32, 28, 28); // align load
-    __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
-    branch_table[btos] = __ pc(); // non-volatile_entry point
-    __ lbzx(R3_RET, Rclass_or_obj, Roffset);
-    __ extsb(R3_RET, R3_RET);
-    __ beq(CCR6, Lacquire);
-    __ blr();
-  }
-
-  if (branch_table[ctos] == 0) { // generate only once
-    __ align(32, 28, 28); // align load
-    __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
-    branch_table[ctos] = __ pc(); // non-volatile_entry point
-    __ lhzx(R3_RET, Rclass_or_obj, Roffset);
-    __ beq(CCR6, Lacquire);
-    __ blr();
-  }
-
-  if (branch_table[stos] == 0) { // generate only once
-    __ align(32, 28, 28); // align load
-    __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
-    branch_table[stos] = __ pc(); // non-volatile_entry point
-    __ lhax(R3_RET, Rclass_or_obj, Roffset);
-    __ beq(CCR6, Lacquire);
-    __ blr();
-  }
-
-  if (branch_table[atos] == 0) { // generate only once
-    __ align(32, 28, 28); // align load
-    __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
-    branch_table[atos] = __ pc(); // non-volatile_entry point
-    __ load_heap_oop(R3_RET, (RegisterOrConstant)Roffset, Rclass_or_obj);
-    __ verify_oop(R3_RET);
-    //__ dcbt(R3_RET); // prefetch
-    __ beq(CCR6, Lacquire);
-    __ blr();
-  }
-
-  __ align(32, 12);
-  __ bind(Lacquire);
-  __ twi_0(R3_RET);
-  __ isync(); // acquire
-  __ blr();
-
-#ifdef ASSERT
-  for (int i = 0; i<number_of_states; ++i) {
-    assert(branch_table[i], "accessor_entry initialization");
-    //tty->print_cr("accessor_entry: branch_table[%d] = 0x%llx (opcode 0x%llx)", i, branch_table[i], *((unsigned int*)branch_table[i]));
-  }
-#endif
-
-  __ bind(Lslow_path);
-  __ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), Rscratch);
-  __ flush();
-
-  return entry;
-}
-
 // Interpreter intrinsic for WeakReference.get().
 // 1. Don't push a full blown frame and go on dispatching, but fetch the value
 //    into R8 and return quickly
@@ -713,7 +529,6 @@
   //   and so we don't need to call the G1 pre-barrier. Thus we can use the
   //   regular method entry code to generate the NPE.
   //
-  // This code is based on generate_accessor_enty.
 
   address entry = __ pc();
 
@@ -768,7 +583,7 @@
 
     return entry;
   } else {
-    return generate_accessor_entry();
+    return generate_jump_to_normal_entry();
   }
 }
 
--- a/hotspot/src/cpu/ppc/vm/ppc.ad	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/ppc.ad	Thu Aug 28 14:53:43 2014 -0700
@@ -1283,8 +1283,6 @@
 
 bool MachConstantBaseNode::requires_postalloc_expand() const { return true; }
 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
-  Compile *C = ra_->C;
-
   iRegPdstOper *op_dst = new iRegPdstOper();
   MachNode *m1 = new loadToc_hiNode();
   MachNode *m2 = new loadToc_loNode();
@@ -2229,7 +2227,7 @@
 }
 /* TODO: PPC port
 // Make a new machine dependent decode node (with its operands).
-MachTypeNode *Matcher::make_decode_node(Compile *C) {
+MachTypeNode *Matcher::make_decode_node() {
   assert(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0,
          "This method is only implemented for unscaled cOops mode so far");
   MachTypeNode *decode = new decodeN_unscaledNode();
@@ -2593,7 +2591,7 @@
   MachNode        *_last;
 } loadConLNodesTuple;
 
-loadConLNodesTuple loadConLNodesTuple_create(Compile *C, PhaseRegAlloc *ra_, Node *toc, immLOper *immSrc,
+loadConLNodesTuple loadConLNodesTuple_create(PhaseRegAlloc *ra_, Node *toc, immLOper *immSrc,
                                              OptoReg::Name reg_second, OptoReg::Name reg_first) {
   loadConLNodesTuple nodes;
 
@@ -2669,7 +2667,7 @@
   enc_class postalloc_expand_load_long_constant(iRegLdst dst, immL src, iRegLdst toc) %{
     // Create new nodes.
     loadConLNodesTuple loadConLNodes =
-      loadConLNodesTuple_create(C, ra_, n_toc, op_src,
+      loadConLNodesTuple_create(ra_, n_toc, op_src,
                                 ra_->get_reg_second(this), ra_->get_reg_first(this));
 
     // Push new nodes.
@@ -3391,7 +3389,7 @@
     immLOper *op_repl = new immLOper((jlong)replicate_immF(op_src->constantF()));
 
     loadConLNodesTuple loadConLNodes =
-      loadConLNodesTuple_create(C, ra_, n_toc, op_repl,
+      loadConLNodesTuple_create(ra_, n_toc, op_repl,
                                 ra_->get_reg_second(this), ra_->get_reg_first(this));
 
     // Push new nodes.
@@ -3611,7 +3609,7 @@
 
     // Create the nodes for loading the IC from the TOC.
     loadConLNodesTuple loadConLNodes_IC =
-      loadConLNodesTuple_create(C, ra_, n_toc, new immLOper((jlong)Universe::non_oop_word()),
+      loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong)Universe::non_oop_word()),
                                 OptoReg::Name(R19_H_num), OptoReg::Name(R19_num));
 
     // Create the call node.
@@ -3765,7 +3763,7 @@
 #if defined(ABI_ELFv2)
     jlong entry_address = (jlong) this->entry_point();
     assert(entry_address, "need address here");
-    loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new immLOper(entry_address),
+    loadConLNodes_Entry = loadConLNodesTuple_create(ra_, n_toc, new immLOper(entry_address),
                                                     OptoReg::Name(R12_H_num), OptoReg::Name(R12_num));
 #else
     // Get the struct that describes the function we are about to call.
@@ -3777,13 +3775,13 @@
     loadConLNodesTuple loadConLNodes_Toc;
 
     // Create nodes and operands for loading the entry point.
-    loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new immLOper(entry_address),
+    loadConLNodes_Entry = loadConLNodesTuple_create(ra_, n_toc, new immLOper(entry_address),
                                                     OptoReg::Name(R12_H_num), OptoReg::Name(R12_num));
 
 
     // Create nodes and operands for loading the env pointer.
     if (fd->env() != NULL) {
-      loadConLNodes_Env = loadConLNodesTuple_create(C, ra_, n_toc, new immLOper((jlong) fd->env()),
+      loadConLNodes_Env = loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong) fd->env()),
                                                     OptoReg::Name(R11_H_num), OptoReg::Name(R11_num));
     } else {
       loadConLNodes_Env._large_hi = NULL;
@@ -3796,7 +3794,7 @@
     }
 
     // Create nodes and operands for loading the Toc point.
-    loadConLNodes_Toc = loadConLNodesTuple_create(C, ra_, n_toc, new immLOper((jlong) fd->toc()),
+    loadConLNodes_Toc = loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong) fd->toc()),
                                                   OptoReg::Name(R2_H_num), OptoReg::Name(R2_num));
 #endif // ABI_ELFv2
     // mtctr node
--- a/hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -30,7 +30,6 @@
   address generate_normal_entry(bool synchronized);
   address generate_native_entry(bool synchronized);
   address generate_math_entry(AbstractInterpreter::MethodKind kind);
-  address generate_empty_entry(void);
 
   void lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded=false);
   void unlock_method(bool check_exceptions = true);
--- a/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -176,8 +176,12 @@
   const Register size  = R12_scratch2;
   __ get_cache_and_index_at_bcp(cache, 1, index_size);
 
-  // Big Endian (get least significant byte of 64 bit value):
+  // Get least significant byte of 64 bit value:
+#if defined(VM_LITTLE_ENDIAN)
+  __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()), cache);
+#else
   __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()) + 7, cache);
+#endif
   __ sldi(size, size, Interpreter::logStackElementSize);
   __ add(R15_esp, R15_esp, size);
   __ dispatch_next(state, step);
@@ -598,48 +602,6 @@
 
 // End of helpers
 
-// ============================================================================
-// Various method entries
-//
-
-// Empty method, generate a very fast return. We must skip this entry if
-// someone's debugging, indicated by the flag
-// "interp_mode" in the Thread obj.
-// Note: empty methods are generated mostly methods that do assertions, which are
-// disabled in the "java opt build".
-address TemplateInterpreterGenerator::generate_empty_entry(void) {
-  if (!UseFastEmptyMethods) {
-    NOT_PRODUCT(__ should_not_reach_here();)
-    return Interpreter::entry_for_kind(Interpreter::zerolocals);
-  }
-
-  Label Lslow_path;
-  const Register Rjvmti_mode = R11_scratch1;
-  address entry = __ pc();
-
-  __ lwz(Rjvmti_mode, thread_(interp_only_mode));
-  __ cmpwi(CCR0, Rjvmti_mode, 0);
-  __ bne(CCR0, Lslow_path); // jvmti_mode!=0
-
-  // Noone's debuggin: Simply return.
-  // Pop c2i arguments (if any) off when we return.
-#ifdef ASSERT
-    __ ld(R9_ARG7, 0, R1_SP);
-    __ ld(R10_ARG8, 0, R21_sender_SP);
-    __ cmpd(CCR0, R9_ARG7, R10_ARG8);
-    __ asm_assert_eq("backlink", 0x545);
-#endif // ASSERT
-  __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
-
-  // And we're done.
-  __ blr();
-
-  __ bind(Lslow_path);
-  __ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
-  __ flush();
-
-  return entry;
-}
 
 // Support abs and sqrt like in compiler.
 // For others we can use a normal (native) entry.
@@ -858,7 +820,9 @@
   // Our signature handlers copy required arguments to the C stack
   // (outgoing C args), R3_ARG1 to R10_ARG8, and FARG1 to FARG13.
   __ mr(R3_ARG1, R18_locals);
+#if !defined(ABI_ELFv2)
   __ ld(signature_handler_fd, 0, signature_handler_fd);
+#endif
 
   __ call_stub(signature_handler_fd);
 
@@ -1020,8 +984,13 @@
   // native result across the call. No oop is present.
 
   __ mr(R3_ARG1, R16_thread);
+#if defined(ABI_ELFv2)
+  __ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
+            relocInfo::none);
+#else
   __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans),
             relocInfo::none);
+#endif
 
   __ bind(sync_check_done);
 
@@ -1278,45 +1247,6 @@
   return entry;
 }
 
-// =============================================================================
-// Entry points
-
-address AbstractInterpreterGenerator::generate_method_entry(
-                                        AbstractInterpreter::MethodKind kind) {
-  // Determine code generation flags.
-  bool synchronized = false;
-  address entry_point = NULL;
-
-  switch (kind) {
-  case Interpreter::zerolocals             :                                                                             break;
-  case Interpreter::zerolocals_synchronized: synchronized = true;                                                        break;
-  case Interpreter::native                 : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false); break;
-  case Interpreter::native_synchronized    : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(true);  break;
-  case Interpreter::empty                  : entry_point = ((InterpreterGenerator*) this)->generate_empty_entry();       break;
-  case Interpreter::accessor               : entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry();    break;
-  case Interpreter::abstract               : entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry();    break;
-
-  case Interpreter::java_lang_math_sin     : // fall thru
-  case Interpreter::java_lang_math_cos     : // fall thru
-  case Interpreter::java_lang_math_tan     : // fall thru
-  case Interpreter::java_lang_math_abs     : // fall thru
-  case Interpreter::java_lang_math_log     : // fall thru
-  case Interpreter::java_lang_math_log10   : // fall thru
-  case Interpreter::java_lang_math_sqrt    : // fall thru
-  case Interpreter::java_lang_math_pow     : // fall thru
-  case Interpreter::java_lang_math_exp     : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind);    break;
-  case Interpreter::java_lang_ref_reference_get
-                                           : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
-  default                                  : ShouldNotReachHere();                                                       break;
-  }
-
-  if (entry_point) {
-    return entry_point;
-  }
-
-  return ((InterpreterGenerator*) this)->generate_normal_entry(synchronized);
-}
-
 // These should never be compiled since the interpreter will prefer
 // the compiled version to the intrinsic version.
 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
@@ -1344,7 +1274,7 @@
                                          int callee_locals,
                                          bool is_top_frame) {
   // Note: This calculation must exactly parallel the frame setup
-  // in AbstractInterpreterGenerator::generate_method_entry.
+  // in InterpreterGenerator::generate_fixed_frame.
   assert(Interpreter::stackElementWords == 1, "sanity");
   const int max_alignment_space = StackAlignmentInBytes / Interpreter::stackElementSize;
   const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :
--- a/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -189,8 +189,12 @@
       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
       __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1);
-      // Big Endian: ((*(cache+indices))>>((1+byte_no)*8))&0xFF
+      // ((*(cache+indices))>>((1+byte_no)*8))&0xFF:
+#if defined(VM_LITTLE_ENDIAN)
+      __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp);
+#else
       __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp);
+#endif
       __ cmpwi(CCR0, Rnew_bc, 0);
       __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc);
       __ beq(CCR0, L_patch_done);
@@ -1839,8 +1843,8 @@
   __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
 
   // Load lo & hi.
-  __ lwz(Rlow_byte, BytesPerInt, Rdef_offset_addr);
-  __ lwz(Rhigh_byte, BytesPerInt * 2, Rdef_offset_addr);
+  __ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned);
+  __ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned);
 
   // Check for default case (=index outside [low,high]).
   __ cmpw(CCR0, R17_tos, Rlow_byte);
@@ -1854,12 +1858,17 @@
   __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2);
   __ sldi(Rindex, Rindex, LogBytesPerInt);
   __ addi(Rindex, Rindex, 3 * BytesPerInt);
+#if defined(VM_LITTLE_ENDIAN)
+  __ lwbrx(Roffset, Rdef_offset_addr, Rindex);
+  __ extsw(Roffset, Roffset);
+#else
   __ lwax(Roffset, Rdef_offset_addr, Rindex);
+#endif
   __ b(Ldispatch);
 
   __ bind(Ldefault_case);
   __ profile_switch_default(Rhigh_byte, Rscratch1);
-  __ lwa(Roffset, 0, Rdef_offset_addr);
+  __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed);
 
   __ bind(Ldispatch);
 
@@ -1875,12 +1884,11 @@
 // Table switch using linear search through cases.
 // Bytecode stream format:
 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ...
-// Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value.
+// Note: Everything is big-endian format here.
 void TemplateTable::fast_linearswitch() {
   transition(itos, vtos);
 
-  Label Lloop_entry, Lsearch_loop, Lfound, Lcontinue_execution, Ldefault_case;
-
+  Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case;
   Register Rcount           = R3_ARG1,
            Rcurrent_pair    = R4_ARG2,
            Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset.
@@ -1894,47 +1902,40 @@
   __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
 
   // Setup loop counter and limit.
-  __ lwz(Rcount, BytesPerInt, Rdef_offset_addr);    // Load count.
+  __ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned);
   __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair.
 
-  // Set up search loop.
-  __ cmpwi(CCR0, Rcount, 0);
-  __ beq(CCR0, Ldefault_case);
-
   __ mtctr(Rcount);
-
-  // linear table search
-  __ bind(Lsearch_loop);
-
-  __ lwz(Rvalue, 0, Rcurrent_pair);
-  __ lwa(Roffset, 1 * BytesPerInt, Rcurrent_pair);
-
-  __ cmpw(CCR0, Rvalue, Rcmp_value);
-  __ beq(CCR0, Lfound);
-
-  __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt);
-  __ bdnz(Lsearch_loop);
-
-  // default case
+  __ cmpwi(CCR0, Rcount, 0);
+  __ bne(CCR0, Lloop_entry);
+
+  // Default case
   __ bind(Ldefault_case);
-
-  __ lwa(Roffset, 0, Rdef_offset_addr);
+  __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed);
   if (ProfileInterpreter) {
     __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */);
-    __ b(Lcontinue_execution);
   }
-
-  // Entry found, skip Roffset bytecodes and continue.
-  __ bind(Lfound);
+  __ b(Lcontinue_execution);
+
+  // Next iteration
+  __ bind(Lsearch_loop);
+  __ bdz(Ldefault_case);
+  __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt);
+  __ bind(Lloop_entry);
+  __ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned);
+  __ cmpw(CCR0, Rvalue, Rcmp_value);
+  __ bne(CCR0, Lsearch_loop);
+
+  // Found, load offset.
+  __ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed);
+  // Calculate case index and profile
+  __ mfctr(Rcurrent_pair);
   if (ProfileInterpreter) {
-    // Calc the num of the pair we hit. Careful, Rcurrent_pair points 2 ints
-    // beyond the actual current pair due to the auto update load above!
-    __ sub(Rcurrent_pair, Rcurrent_pair, Rdef_offset_addr);
-    __ addi(Rcurrent_pair, Rcurrent_pair, - 2 * BytesPerInt);
-    __ srdi(Rcurrent_pair, Rcurrent_pair, LogBytesPerInt + 1);
+    __ sub(Rcurrent_pair, Rcount, Rcurrent_pair);
     __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch);
-    __ bind(Lcontinue_execution);
   }
+
+  __ bind(Lcontinue_execution);
   __ add(R14_bcp, Roffset, R14_bcp);
   __ dispatch_next(vtos);
 }
@@ -1990,7 +1991,7 @@
 
   // initialize i & j
   __ li(Ri,0);
-  __ lwz(Rj, -BytesPerInt, Rarray);
+  __ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned);
 
   // and start.
   Label entry;
@@ -2007,7 +2008,11 @@
     //   i = h;
     // }
     __ sldi(Rscratch, Rh, log_entry_size);
+#if defined(VM_LITTLE_ENDIAN)
+    __ lwbrx(Rscratch, Rscratch, Rarray);
+#else
     __ lwzx(Rscratch, Rscratch, Rarray);
+#endif
 
     // if (key < current value)
     //   Rh = Rj
@@ -2039,20 +2044,20 @@
   // Ri = value offset
   __ sldi(Ri, Ri, log_entry_size);
   __ add(Ri, Ri, Rarray);
-  __ lwz(Rscratch, 0, Ri);
+  __ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned);
 
   Label not_found;
   // Ri = offset offset
   __ cmpw(CCR0, Rkey, Rscratch);
   __ beq(CCR0, not_found);
   // entry not found -> j = default offset
-  __ lwz(Rj, -2 * BytesPerInt, Rarray);
+  __ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned);
   __ b(default_case);
 
   __ bind(not_found);
   // entry found -> j = offset
   __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
-  __ lwz(Rj, BytesPerInt, Ri);
+  __ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned);
 
   if (ProfileInterpreter) {
     __ b(continue_execution);
@@ -2147,8 +2152,11 @@
 
   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
   // We are resolved if the indices offset contains the current bytecode.
-  // Big Endian:
+#if defined(VM_LITTLE_ENDIAN)
+  __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache);
+#else
   __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache);
+#endif
   // Acquire by cmp-br-isync (see below).
   __ cmpdi(CCR0, Rscratch, (int)bytecode());
   __ beq(CCR0, Lresolved);
--- a/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -29,6 +29,7 @@
 #include "compiler/disassembler.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/java.hpp"
+#include "runtime/os.hpp"
 #include "runtime/stubCodeGenerator.hpp"
 #include "utilities/defaultStream.hpp"
 #include "vm_version_ppc.hpp"
@@ -108,7 +109,7 @@
                (has_vand()    ? " vand"    : "")
                // Make sure number of %s matches num_features!
               );
-  _features_str = strdup(buf);
+  _features_str = os::strdup(buf);
   NOT_PRODUCT(if (Verbose) print_features(););
 
   // PPC64 supports 8-byte compare-exchange operations (see
--- a/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
 #include "interpreter/interpreter.hpp"
 #include "interpreter/interpreterGenerator.hpp"
 #include "interpreter/interpreterRuntime.hpp"
+#include "interpreter/interp_masm.hpp"
 #include "oops/arrayOop.hpp"
 #include "oops/methodData.hpp"
 #include "oops/method.hpp"
@@ -68,9 +69,7 @@
 #define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
 #define __ _masm->
 
-Label frame_manager_entry;
-Label fast_accessor_slow_entry_path;  // fast accessor methods need to be able to jmp to unsynchronized
-                                      // c++ interpreter entry point this holds that entry point label.
+Label frame_manager_entry; // c++ interpreter entry point this holds that entry point label.
 
 static address unctrap_frame_manager_entry  = NULL;
 
@@ -452,110 +451,6 @@
   return NULL;
 }
 
-// Call an accessor method (assuming it is resolved, otherwise drop into
-// vanilla (slow path) entry
-
-// Generates code to elide accessor methods
-// Uses G3_scratch and G1_scratch as scratch
-address InterpreterGenerator::generate_accessor_entry(void) {
-
-  // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
-  // parameter size = 1
-  // Note: We can only use this code if the getfield has been resolved
-  //       and if we don't have a null-pointer exception => check for
-  //       these conditions first and use slow path if necessary.
-  address entry = __ pc();
-  Label slow_path;
-
-  if ( UseFastAccessorMethods) {
-    // Check if we need to reach a safepoint and generate full interpreter
-    // frame if so.
-    AddressLiteral sync_state(SafepointSynchronize::address_of_state());
-    __ load_contents(sync_state, G3_scratch);
-    __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
-    __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
-    __ delayed()->nop();
-
-    // Check if local 0 != NULL
-    __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
-    __ tst(Otos_i);  // check if local 0 == NULL and go the slow path
-    __ brx(Assembler::zero, false, Assembler::pn, slow_path);
-    __ delayed()->nop();
-
-
-    // read first instruction word and extract bytecode @ 1 and index @ 2
-    // get first 4 bytes of the bytecodes (big endian!)
-    __ ld_ptr(Address(G5_method, in_bytes(Method::const_offset())), G1_scratch);
-    __ ld(Address(G1_scratch, in_bytes(ConstMethod::codes_offset())), G1_scratch);
-
-    // move index @ 2 far left then to the right most two bytes.
-    __ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
-    __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
-                      ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
-
-    // get constant pool cache
-    __ ld_ptr(G5_method, in_bytes(Method::const_offset()), G3_scratch);
-    __ ld_ptr(G3_scratch, in_bytes(ConstMethod::constants_offset()), G3_scratch);
-    __ ld_ptr(G3_scratch, ConstantPool::cache_offset_in_bytes(), G3_scratch);
-
-    // get specific constant pool cache entry
-    __ add(G3_scratch, G1_scratch, G3_scratch);
-
-    // Check the constant Pool cache entry to see if it has been resolved.
-    // If not, need the slow path.
-    ByteSize cp_base_offset = ConstantPoolCache::base_offset();
-    __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::indices_offset()), G1_scratch);
-    __ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
-    __ and3(G1_scratch, 0xFF, G1_scratch);
-    __ cmp(G1_scratch, Bytecodes::_getfield);
-    __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
-    __ delayed()->nop();
-
-    // Get the type and return field offset from the constant pool cache
-    __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()), G1_scratch);
-    __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()), G3_scratch);
-
-    Label xreturn_path;
-    // Need to differentiate between igetfield, agetfield, bgetfield etc.
-    // because they are different sizes.
-    // Get the type from the constant pool cache
-    __ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch);
-    // Make sure we don't need to mask G1_scratch after the above shift
-    ConstantPoolCacheEntry::verify_tos_state_shift();
-    __ cmp(G1_scratch, atos );
-    __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
-    __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
-    __ cmp(G1_scratch, itos);
-    __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
-    __ delayed()->ld(Otos_i, G3_scratch, Otos_i);
-    __ cmp(G1_scratch, stos);
-    __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
-    __ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
-    __ cmp(G1_scratch, ctos);
-    __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
-    __ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
-#ifdef ASSERT
-    __ cmp(G1_scratch, btos);
-    __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
-    __ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
-    __ should_not_reach_here();
-#endif
-    __ ldsb(Otos_i, G3_scratch, Otos_i);
-    __ bind(xreturn_path);
-
-    // _ireturn/_areturn
-    __ retl();                      // return from leaf routine
-    __ delayed()->mov(O5_savedSP, SP);
-
-    // Generate regular method entry
-    __ bind(slow_path);
-    __ ba(fast_accessor_slow_entry_path);
-    __ delayed()->nop();
-    return entry;
-  }
-  return NULL;
-}
-
 address InterpreterGenerator::generate_Reference_get_entry(void) {
 #if INCLUDE_ALL_GCS
   if (UseG1GC) {
@@ -573,7 +468,7 @@
 
   // If G1 is not enabled then attempt to go through the accessor entry point
   // Reference.get is an accessor
-  return generate_accessor_entry();
+  return generate_jump_to_normal_entry();
 }
 
 //
@@ -1870,23 +1765,6 @@
   __ ba(call_interpreter_2);
   __ delayed()->st_ptr(O1, STATE(_stack));
 
-
-  // Fast accessor methods share this entry point.
-  // This works because frame manager is in the same codelet
-  // This can either be an entry via call_stub/c1/c2 or a recursive interpreter call
-  // we need to do a little register fixup here once we distinguish the two of them
-  if (UseFastAccessorMethods && !synchronized) {
-  // Call stub_return address still in O7
-    __ bind(fast_accessor_slow_entry_path);
-    __ set((intptr_t)return_from_native_method - 8, Gtmp1);
-    __ cmp(Gtmp1, O7);                                                // returning to interpreter?
-    __ brx(Assembler::equal, true, Assembler::pt, re_dispatch);       // yep
-    __ delayed()->nop();
-    __ ba(re_dispatch);
-    __ delayed()->mov(G0, prevState);                                 // initial entry
-
-  }
-
   // interpreter returning to native code (call_stub/c1/c2)
   // convert result and unwind initial activation
   // L2_scratch - scaled result type index
--- a/hotspot/src/cpu/sparc/vm/interpreterGenerator_sparc.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/interpreterGenerator_sparc.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,9 +32,11 @@
   address generate_normal_entry(bool synchronized);
   address generate_native_entry(bool synchronized);
   address generate_abstract_entry(void);
-  address generate_math_entry(AbstractInterpreter::MethodKind kind);
-  address generate_empty_entry(void);
-  address generate_accessor_entry(void);
+  // there are no math intrinsics on sparc
+  address generate_math_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
+  address generate_jump_to_normal_entry(void);
+  address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
+  address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
   address generate_Reference_get_entry(void);
   void lock_method(void);
   void save_native_result(void);
@@ -43,4 +45,7 @@
   void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
   void generate_counter_overflow(Label& Lcontinue);
 
+  // Not supported
+  address generate_CRC32_update_entry() { return NULL; }
+  address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
 #endif // CPU_SPARC_VM_INTERPRETERGENERATOR_SPARC_HPP
--- a/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -241,6 +241,15 @@
 
 // Various method entries
 
+address InterpreterGenerator::generate_jump_to_normal_entry(void) {
+  address entry = __ pc();
+  assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
+  AddressLiteral al(Interpreter::entry_for_kind(Interpreter::zerolocals));
+  __ jump_to(al, G3_scratch);
+  __ delayed()->nop();
+  return entry;
+}
+
 // Abstract method entry
 // Attempt to execute abstract method. Throw exception
 //
@@ -255,159 +264,6 @@
 
 }
 
-
-//----------------------------------------------------------------------------------------------------
-// Entry points & stack frame layout
-//
-// Here we generate the various kind of entries into the interpreter.
-// The two main entry type are generic bytecode methods and native call method.
-// These both come in synchronized and non-synchronized versions but the
-// frame layout they create is very similar. The other method entry
-// types are really just special purpose entries that are really entry
-// and interpretation all in one. These are for trivial methods like
-// accessor, empty, or special math methods.
-//
-// When control flow reaches any of the entry types for the interpreter
-// the following holds ->
-//
-// C2 Calling Conventions:
-//
-// The entry code below assumes that the following registers are set
-// when coming in:
-//    G5_method: holds the Method* of the method to call
-//    Lesp:    points to the TOS of the callers expression stack
-//             after having pushed all the parameters
-//
-// The entry code does the following to setup an interpreter frame
-//   pop parameters from the callers stack by adjusting Lesp
-//   set O0 to Lesp
-//   compute X = (max_locals - num_parameters)
-//   bump SP up by X to accomadate the extra locals
-//   compute X = max_expression_stack
-//               + vm_local_words
-//               + 16 words of register save area
-//   save frame doing a save sp, -X, sp growing towards lower addresses
-//   set Lbcp, Lmethod, LcpoolCache
-//   set Llocals to i0
-//   set Lmonitors to FP - rounded_vm_local_words
-//   set Lesp to Lmonitors - 4
-//
-//  The frame has now been setup to do the rest of the entry code
-
-// Try this optimization:  Most method entries could live in a
-// "one size fits all" stack frame without all the dynamic size
-// calculations.  It might be profitable to do all this calculation
-// statically and approximately for "small enough" methods.
-
-//-----------------------------------------------------------------------------------------------
-
-// C1 Calling conventions
-//
-// Upon method entry, the following registers are setup:
-//
-// g2 G2_thread: current thread
-// g5 G5_method: method to activate
-// g4 Gargs  : pointer to last argument
-//
-//
-// Stack:
-//
-// +---------------+ <--- sp
-// |               |
-// : reg save area :
-// |               |
-// +---------------+ <--- sp + 0x40
-// |               |
-// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
-// |               |
-// +---------------+ <--- sp + 0x5c
-// |               |
-// :     free      :
-// |               |
-// +---------------+ <--- Gargs
-// |               |
-// :   arguments   :
-// |               |
-// +---------------+
-// |               |
-//
-//
-//
-// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
-//
-// +---------------+ <--- sp
-// |               |
-// : reg save area :
-// |               |
-// +---------------+ <--- sp + 0x40
-// |               |
-// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
-// |               |
-// +---------------+ <--- sp + 0x5c
-// |               |
-// :               :
-// |               | <--- Lesp
-// +---------------+ <--- Lmonitors (fp - 0x18)
-// |   VM locals   |
-// +---------------+ <--- fp
-// |               |
-// : reg save area :
-// |               |
-// +---------------+ <--- fp + 0x40
-// |               |
-// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
-// |               |
-// +---------------+ <--- fp + 0x5c
-// |               |
-// :     free      :
-// |               |
-// +---------------+
-// |               |
-// : nonarg locals :
-// |               |
-// +---------------+
-// |               |
-// :   arguments   :
-// |               | <--- Llocals
-// +---------------+ <--- Gargs
-// |               |
-
-address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
-  // determine code generation flags
-  bool synchronized = false;
-  address entry_point = NULL;
-
-  switch (kind) {
-    case Interpreter::zerolocals             :                                                                             break;
-    case Interpreter::zerolocals_synchronized: synchronized = true;                                                        break;
-    case Interpreter::native                 : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false);  break;
-    case Interpreter::native_synchronized    : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true);   break;
-    case Interpreter::empty                  : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry();        break;
-    case Interpreter::accessor               : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry();     break;
-    case Interpreter::abstract               : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry();     break;
-
-    case Interpreter::java_lang_math_sin     :                                                                             break;
-    case Interpreter::java_lang_math_cos     :                                                                             break;
-    case Interpreter::java_lang_math_tan     :                                                                             break;
-    case Interpreter::java_lang_math_sqrt    :                                                                             break;
-    case Interpreter::java_lang_math_abs     :                                                                             break;
-    case Interpreter::java_lang_math_log     :                                                                             break;
-    case Interpreter::java_lang_math_log10   :                                                                             break;
-    case Interpreter::java_lang_math_pow     :                                                                             break;
-    case Interpreter::java_lang_math_exp     :                                                                             break;
-    case Interpreter::java_lang_ref_reference_get
-                                             : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
-    default:
-      fatal(err_msg("unexpected method kind: %d", kind));
-      break;
-  }
-
-  if (entry_point) return entry_point;
-
-  return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
-}
-
-
 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
   // No special entry points that preclude compilation
   return true;
--- a/hotspot/src/cpu/sparc/vm/sparc.ad	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad	Thu Aug 28 14:53:43 2014 -0700
@@ -6184,7 +6184,11 @@
   ins_cost(DEFAULT_COST * 3/2);
   format %{ "SET    $con,$dst\t! non-oop ptr" %}
   ins_encode %{
-    __ set($con$$constant, $dst$$Register);
+    if (_opnds[1]->constant_reloc() == relocInfo::metadata_type) {
+      __ set_metadata_constant((Metadata*)$con$$constant, $dst$$Register);
+    } else {
+      __ set($con$$constant, $dst$$Register);
+    }
   %}
   ins_pipe(loadConP);
 %}
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -456,6 +456,115 @@
 // Generate a fixed interpreter frame. This is identical setup for interpreted
 // methods and for native methods hence the shared code.
 
+
+//----------------------------------------------------------------------------------------------------
+// Stack frame layout
+//
+// When control flow reaches any of the entry types for the interpreter
+// the following holds ->
+//
+// C2 Calling Conventions:
+//
+// The entry code below assumes that the following registers are set
+// when coming in:
+//    G5_method: holds the Method* of the method to call
+//    Lesp:    points to the TOS of the callers expression stack
+//             after having pushed all the parameters
+//
+// The entry code does the following to setup an interpreter frame
+//   pop parameters from the callers stack by adjusting Lesp
+//   set O0 to Lesp
+//   compute X = (max_locals - num_parameters)
+//   bump SP up by X to accomadate the extra locals
+//   compute X = max_expression_stack
+//               + vm_local_words
+//               + 16 words of register save area
+//   save frame doing a save sp, -X, sp growing towards lower addresses
+//   set Lbcp, Lmethod, LcpoolCache
+//   set Llocals to i0
+//   set Lmonitors to FP - rounded_vm_local_words
+//   set Lesp to Lmonitors - 4
+//
+//  The frame has now been setup to do the rest of the entry code
+
+// Try this optimization:  Most method entries could live in a
+// "one size fits all" stack frame without all the dynamic size
+// calculations.  It might be profitable to do all this calculation
+// statically and approximately for "small enough" methods.
+
+//-----------------------------------------------------------------------------------------------
+
+// C1 Calling conventions
+//
+// Upon method entry, the following registers are setup:
+//
+// g2 G2_thread: current thread
+// g5 G5_method: method to activate
+// g4 Gargs  : pointer to last argument
+//
+//
+// Stack:
+//
+// +---------------+ <--- sp
+// |               |
+// : reg save area :
+// |               |
+// +---------------+ <--- sp + 0x40
+// |               |
+// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
+// |               |
+// +---------------+ <--- sp + 0x5c
+// |               |
+// :     free      :
+// |               |
+// +---------------+ <--- Gargs
+// |               |
+// :   arguments   :
+// |               |
+// +---------------+
+// |               |
+//
+//
+//
+// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
+//
+// +---------------+ <--- sp
+// |               |
+// : reg save area :
+// |               |
+// +---------------+ <--- sp + 0x40
+// |               |
+// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
+// |               |
+// +---------------+ <--- sp + 0x5c
+// |               |
+// :               :
+// |               | <--- Lesp
+// +---------------+ <--- Lmonitors (fp - 0x18)
+// |   VM locals   |
+// +---------------+ <--- fp
+// |               |
+// : reg save area :
+// |               |
+// +---------------+ <--- fp + 0x40
+// |               |
+// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
+// |               |
+// +---------------+ <--- fp + 0x5c
+// |               |
+// :     free      :
+// |               |
+// +---------------+
+// |               |
+// : nonarg locals :
+// |               |
+// +---------------+
+// |               |
+// :   arguments   :
+// |               | <--- Llocals
+// +---------------+ <--- Gargs
+// |               |
+
 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
   //
   //
@@ -599,136 +708,6 @@
 
 }
 
-// Empty method, generate a very fast return.
-
-address InterpreterGenerator::generate_empty_entry(void) {
-
-  // A method that does nother but return...
-
-  address entry = __ pc();
-  Label slow_path;
-
-  // do nothing for empty methods (do not even increment invocation counter)
-  if ( UseFastEmptyMethods) {
-    // If we need a safepoint check, generate full interpreter entry.
-    AddressLiteral sync_state(SafepointSynchronize::address_of_state());
-    __ set(sync_state, G3_scratch);
-    __ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
-
-    // Code: _return
-    __ retl();
-    __ delayed()->mov(O5_savedSP, SP);
-
-    __ bind(slow_path);
-    (void) generate_normal_entry(false);
-
-    return entry;
-  }
-  return NULL;
-}
-
-// Call an accessor method (assuming it is resolved, otherwise drop into
-// vanilla (slow path) entry
-
-// Generates code to elide accessor methods
-// Uses G3_scratch and G1_scratch as scratch
-address InterpreterGenerator::generate_accessor_entry(void) {
-
-  // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
-  // parameter size = 1
-  // Note: We can only use this code if the getfield has been resolved
-  //       and if we don't have a null-pointer exception => check for
-  //       these conditions first and use slow path if necessary.
-  address entry = __ pc();
-  Label slow_path;
-
-
-  // XXX: for compressed oops pointer loading and decoding doesn't fit in
-  // delay slot and damages G1
-  if ( UseFastAccessorMethods && !UseCompressedOops ) {
-    // Check if we need to reach a safepoint and generate full interpreter
-    // frame if so.
-    AddressLiteral sync_state(SafepointSynchronize::address_of_state());
-    __ load_contents(sync_state, G3_scratch);
-    __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
-    __ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
-
-    // Check if local 0 != NULL
-    __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
-    // check if local 0 == NULL and go the slow path
-    __ br_null_short(Otos_i, Assembler::pn, slow_path);
-
-
-    // read first instruction word and extract bytecode @ 1 and index @ 2
-    // get first 4 bytes of the bytecodes (big endian!)
-    __ ld_ptr(G5_method, Method::const_offset(), G1_scratch);
-    __ ld(G1_scratch, ConstMethod::codes_offset(), G1_scratch);
-
-    // move index @ 2 far left then to the right most two bytes.
-    __ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
-    __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
-                      ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
-
-    // get constant pool cache
-    __ ld_ptr(G5_method, Method::const_offset(), G3_scratch);
-    __ ld_ptr(G3_scratch, ConstMethod::constants_offset(), G3_scratch);
-    __ ld_ptr(G3_scratch, ConstantPool::cache_offset_in_bytes(), G3_scratch);
-
-    // get specific constant pool cache entry
-    __ add(G3_scratch, G1_scratch, G3_scratch);
-
-    // Check the constant Pool cache entry to see if it has been resolved.
-    // If not, need the slow path.
-    ByteSize cp_base_offset = ConstantPoolCache::base_offset();
-    __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch);
-    __ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
-    __ and3(G1_scratch, 0xFF, G1_scratch);
-    __ cmp_and_br_short(G1_scratch, Bytecodes::_getfield, Assembler::notEqual, Assembler::pn, slow_path);
-
-    // Get the type and return field offset from the constant pool cache
-    __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch);
-    __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch);
-
-    Label xreturn_path;
-    // Need to differentiate between igetfield, agetfield, bgetfield etc.
-    // because they are different sizes.
-    // Get the type from the constant pool cache
-    __ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch);
-    // Make sure we don't need to mask G1_scratch after the above shift
-    ConstantPoolCacheEntry::verify_tos_state_shift();
-    __ cmp(G1_scratch, atos );
-    __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
-    __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
-    __ cmp(G1_scratch, itos);
-    __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
-    __ delayed()->ld(Otos_i, G3_scratch, Otos_i);
-    __ cmp(G1_scratch, stos);
-    __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
-    __ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
-    __ cmp(G1_scratch, ctos);
-    __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
-    __ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
-#ifdef ASSERT
-    __ cmp(G1_scratch, btos);
-    __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
-    __ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
-    __ should_not_reach_here();
-#endif
-    __ ldsb(Otos_i, G3_scratch, Otos_i);
-    __ bind(xreturn_path);
-
-    // _ireturn/_areturn
-    __ retl();                      // return from leaf routine
-    __ delayed()->mov(O5_savedSP, SP);
-
-    // Generate regular method entry
-    __ bind(slow_path);
-    (void) generate_normal_entry(false);
-    return entry;
-  }
-  return NULL;
-}
-
 // Method entry for java.lang.ref.Reference.get.
 address InterpreterGenerator::generate_Reference_get_entry(void) {
 #if INCLUDE_ALL_GCS
@@ -806,7 +785,7 @@
 
   // If G1 is not enabled then attempt to go through the accessor entry point
   // Reference.get is an accessor
-  return generate_accessor_entry();
+  return generate_jump_to_normal_entry();
 }
 
 //
@@ -1242,8 +1221,6 @@
 
 
 // Generic method entry to (asm) interpreter
-//------------------------------------------------------------------------------------------------------------------------
-//
 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
   address entry = __ pc();
 
@@ -1410,123 +1387,6 @@
   return entry;
 }
 
-
-//----------------------------------------------------------------------------------------------------
-// Entry points & stack frame layout
-//
-// Here we generate the various kind of entries into the interpreter.
-// The two main entry type are generic bytecode methods and native call method.
-// These both come in synchronized and non-synchronized versions but the
-// frame layout they create is very similar. The other method entry
-// types are really just special purpose entries that are really entry
-// and interpretation all in one. These are for trivial methods like
-// accessor, empty, or special math methods.
-//
-// When control flow reaches any of the entry types for the interpreter
-// the following holds ->
-//
-// C2 Calling Conventions:
-//
-// The entry code below assumes that the following registers are set
-// when coming in:
-//    G5_method: holds the Method* of the method to call
-//    Lesp:    points to the TOS of the callers expression stack
-//             after having pushed all the parameters
-//
-// The entry code does the following to setup an interpreter frame
-//   pop parameters from the callers stack by adjusting Lesp
-//   set O0 to Lesp
-//   compute X = (max_locals - num_parameters)
-//   bump SP up by X to accomadate the extra locals
-//   compute X = max_expression_stack
-//               + vm_local_words
-//               + 16 words of register save area
-//   save frame doing a save sp, -X, sp growing towards lower addresses
-//   set Lbcp, Lmethod, LcpoolCache
-//   set Llocals to i0
-//   set Lmonitors to FP - rounded_vm_local_words
-//   set Lesp to Lmonitors - 4
-//
-//  The frame has now been setup to do the rest of the entry code
-
-// Try this optimization:  Most method entries could live in a
-// "one size fits all" stack frame without all the dynamic size
-// calculations.  It might be profitable to do all this calculation
-// statically and approximately for "small enough" methods.
-
-//-----------------------------------------------------------------------------------------------
-
-// C1 Calling conventions
-//
-// Upon method entry, the following registers are setup:
-//
-// g2 G2_thread: current thread
-// g5 G5_method: method to activate
-// g4 Gargs  : pointer to last argument
-//
-//
-// Stack:
-//
-// +---------------+ <--- sp
-// |               |
-// : reg save area :
-// |               |
-// +---------------+ <--- sp + 0x40
-// |               |
-// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
-// |               |
-// +---------------+ <--- sp + 0x5c
-// |               |
-// :     free      :
-// |               |
-// +---------------+ <--- Gargs
-// |               |
-// :   arguments   :
-// |               |
-// +---------------+
-// |               |
-//
-//
-//
-// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
-//
-// +---------------+ <--- sp
-// |               |
-// : reg save area :
-// |               |
-// +---------------+ <--- sp + 0x40
-// |               |
-// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
-// |               |
-// +---------------+ <--- sp + 0x5c
-// |               |
-// :               :
-// |               | <--- Lesp
-// +---------------+ <--- Lmonitors (fp - 0x18)
-// |   VM locals   |
-// +---------------+ <--- fp
-// |               |
-// : reg save area :
-// |               |
-// +---------------+ <--- fp + 0x40
-// |               |
-// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
-// |               |
-// +---------------+ <--- fp + 0x5c
-// |               |
-// :     free      :
-// |               |
-// +---------------+
-// |               |
-// : nonarg locals :
-// |               |
-// +---------------+
-// |               |
-// :   arguments   :
-// |               | <--- Llocals
-// +---------------+ <--- Gargs
-// |               |
-
 static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
 
   // Figure out the size of an interpreter frame (in words) given that we have a fully allocated
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -26,6 +26,7 @@
 #include "asm/macroAssembler.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/java.hpp"
+#include "runtime/os.hpp"
 #include "runtime/stubCodeGenerator.hpp"
 #include "vm_version_sparc.hpp"
 
@@ -249,7 +250,7 @@
                (!has_hardware_fsmuld() ? ", no-fsmuld" : ""));
 
   // buf is started with ", " or is empty
-  _features_str = strdup(strlen(buf) > 2 ? buf + 2 : buf);
+  _features_str = os::strdup(strlen(buf) > 2 ? buf + 2 : buf);
 
   // There are three 64-bit SPARC families that do not overlap, e.g.,
   // both is_ultra3() and is_sparc64() cannot be true at the same time.
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -3854,6 +3854,15 @@
 }
 
 // Carry-Less Multiplication Quadword
+void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) {
+  assert(VM_Version::supports_clmul(), "");
+  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
+  emit_int8(0x44);
+  emit_int8((unsigned char)(0xC0 | encode));
+  emit_int8((unsigned char)mask);
+}
+
+// Carry-Less Multiplication Quadword
 void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) {
   assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), "");
   bool vector256 = false;
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1837,6 +1837,7 @@
   void vpbroadcastd(XMMRegister dst, XMMRegister src);
 
   // Carry-Less Multiplication Quadword
+  void pclmulqdq(XMMRegister dst, XMMRegister src, int mask);
   void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask);
 
   // AVX instruction which is used to clear upper 128 bits of YMM registers and
--- a/hotspot/src/cpu/x86/vm/cppInterpreterGenerator_x86.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/cppInterpreterGenerator_x86.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,21 +27,6 @@
 
  protected:
 
-#if 0
-  address generate_asm_interpreter_entry(bool synchronized);
-  address generate_native_entry(bool synchronized);
-  address generate_abstract_entry(void);
-  address generate_math_entry(AbstractInterpreter::MethodKind kind);
-  address generate_empty_entry(void);
-  address generate_accessor_entry(void);
-  address generate_Reference_get_entry(void);
-  void lock_method(void);
-  void generate_stack_overflow_check(void);
-
-  void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
-  void generate_counter_overflow(Label* do_continue);
-#endif
-
   void generate_more_monitors();
   void generate_deopt_handling();
   address generate_interpreter_frame_manager(bool synchronized); // C++ interpreter only
--- a/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -66,9 +66,6 @@
 #define __ _masm->
 #define STATE(field_name) (Address(state, byte_offset_of(BytecodeInterpreter, field_name)))
 
-Label fast_accessor_slow_entry_path;  // fast accessor methods need to be able to jmp to unsynchronized
-                                      // c++ interpreter entry point this holds that entry point label.
-
 // default registers for state and sender_sp
 // state and sender_sp are the same on 32bit because we have no choice.
 // state could be rsi on 64bit but it is an arg reg and not callee save
@@ -660,7 +657,6 @@
   // generate_method_entry) so the guard should work for them too.
   //
 
-  // monitor entry size: see picture of stack set (generate_method_entry) and frame_i486.hpp
   const int entry_size    = frame::interpreter_frame_monitor_size() * wordSize;
 
   // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
@@ -794,156 +790,6 @@
   __ lock_object(monitor);
 }
 
-// Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
-
-address InterpreterGenerator::generate_accessor_entry(void) {
-
-  // rbx: Method*
-
-  // rsi/r13: senderSP must preserved for slow path, set SP to it on fast path
-
-  Label xreturn_path;
-
-  // do fastpath for resolved accessor methods
-  if (UseFastAccessorMethods) {
-
-    address entry_point = __ pc();
-
-    Label slow_path;
-    // If we need a safepoint check, generate full interpreter entry.
-    ExternalAddress state(SafepointSynchronize::address_of_state());
-    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
-             SafepointSynchronize::_not_synchronized);
-
-    __ jcc(Assembler::notEqual, slow_path);
-    // ASM/C++ Interpreter
-    // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
-    // Note: We can only use this code if the getfield has been resolved
-    //       and if we don't have a null-pointer exception => check for
-    //       these conditions first and use slow path if necessary.
-    // rbx,: method
-    // rcx: receiver
-    __ movptr(rax, Address(rsp, wordSize));
-
-    // check if local 0 != NULL and read field
-    __ testptr(rax, rax);
-    __ jcc(Assembler::zero, slow_path);
-
-    // read first instruction word and extract bytecode @ 1 and index @ 2
-    __ movptr(rdx, Address(rbx, Method::const_offset()));
-    __ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
-    __ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
-    // Shift codes right to get the index on the right.
-    // The bytecode fetched looks like <index><0xb4><0x2a>
-    __ shrl(rdx, 2*BitsPerByte);
-    __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
-    __ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
-
-    // rax,: local 0
-    // rbx,: method
-    // rcx: receiver - do not destroy since it is needed for slow path!
-    // rcx: scratch
-    // rdx: constant pool cache index
-    // rdi: constant pool cache
-    // rsi/r13: sender sp
-
-    // check if getfield has been resolved and read constant pool cache entry
-    // check the validity of the cache entry by testing whether _indices field
-    // contains Bytecode::_getfield in b1 byte.
-    assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
-    __ movl(rcx,
-            Address(rdi,
-                    rdx,
-                    Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
-    __ shrl(rcx, 2*BitsPerByte);
-    __ andl(rcx, 0xFF);
-    __ cmpl(rcx, Bytecodes::_getfield);
-    __ jcc(Assembler::notEqual, slow_path);
-
-    // Note: constant pool entry is not valid before bytecode is resolved
-    __ movptr(rcx,
-            Address(rdi,
-                    rdx,
-                    Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
-    __ movl(rdx,
-            Address(rdi,
-                    rdx,
-                    Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
-
-    Label notByte, notShort, notChar;
-    const Address field_address (rax, rcx, Address::times_1);
-
-    // Need to differentiate between igetfield, agetfield, bgetfield etc.
-    // because they are different sizes.
-    // Use the type from the constant pool cache
-    __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
-    // Make sure we don't need to mask rdx after the above shift
-    ConstantPoolCacheEntry::verify_tos_state_shift();
-#ifdef _LP64
-    Label notObj;
-    __ cmpl(rdx, atos);
-    __ jcc(Assembler::notEqual, notObj);
-    // atos
-    __ movptr(rax, field_address);
-    __ jmp(xreturn_path);
-
-    __ bind(notObj);
-#endif // _LP64
-    __ cmpl(rdx, btos);
-    __ jcc(Assembler::notEqual, notByte);
-    __ load_signed_byte(rax, field_address);
-    __ jmp(xreturn_path);
-
-    __ bind(notByte);
-    __ cmpl(rdx, stos);
-    __ jcc(Assembler::notEqual, notShort);
-    __ load_signed_short(rax, field_address);
-    __ jmp(xreturn_path);
-
-    __ bind(notShort);
-    __ cmpl(rdx, ctos);
-    __ jcc(Assembler::notEqual, notChar);
-    __ load_unsigned_short(rax, field_address);
-    __ jmp(xreturn_path);
-
-    __ bind(notChar);
-#ifdef ASSERT
-    Label okay;
-#ifndef _LP64
-    __ cmpl(rdx, atos);
-    __ jcc(Assembler::equal, okay);
-#endif // _LP64
-    __ cmpl(rdx, itos);
-    __ jcc(Assembler::equal, okay);
-    __ stop("what type is this?");
-    __ bind(okay);
-#endif // ASSERT
-    // All the rest are a 32 bit wordsize
-    __ movl(rax, field_address);
-
-    __ bind(xreturn_path);
-
-    // _ireturn/_areturn
-    __ pop(rdi);                               // get return address
-    __ mov(rsp, sender_sp_on_entry);           // set sp to sender sp
-    __ jmp(rdi);
-
-    // generate a vanilla interpreter entry as the slow path
-    __ bind(slow_path);
-    // We will enter c++ interpreter looking like it was
-    // called by the call_stub this will cause it to return
-    // a tosca result to the invoker which might have been
-    // the c++ interpreter itself.
-
-    __ jmp(fast_accessor_slow_entry_path);
-    return entry_point;
-
-  } else {
-    return NULL;
-  }
-
-}
-
 address InterpreterGenerator::generate_Reference_get_entry(void) {
 #if INCLUDE_ALL_GCS
   if (UseG1GC) {
@@ -961,7 +807,7 @@
 
   // If G1 is not enabled then attempt to go through the accessor entry point
   // Reference.get is an accessor
-  return generate_accessor_entry();
+  return generate_jump_to_normal_entry();
 }
 
 //
@@ -1670,10 +1516,6 @@
 
   address entry_point = __ pc();
 
-  // Fast accessor methods share this entry point.
-  // This works because frame manager is in the same codelet
-  if (UseFastAccessorMethods && !synchronized) __ bind(fast_accessor_slow_entry_path);
-
   Label dispatch_entry_2;
   __ movptr(rcx, sender_sp_on_entry);
   __ movptr(state, (int32_t)NULL_WORD);                              // no current activation
@@ -2212,40 +2054,6 @@
   return entry_point;
 }
 
-address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
-  // determine code generation flags
-  bool synchronized = false;
-  address entry_point = NULL;
-
-  switch (kind) {
-    case Interpreter::zerolocals             :                                                                             break;
-    case Interpreter::zerolocals_synchronized: synchronized = true;                                                        break;
-    case Interpreter::native                 : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false);  break;
-    case Interpreter::native_synchronized    : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true);   break;
-    case Interpreter::empty                  : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry();        break;
-    case Interpreter::accessor               : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry();     break;
-    case Interpreter::abstract               : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry();     break;
-
-    case Interpreter::java_lang_math_sin     : // fall thru
-    case Interpreter::java_lang_math_cos     : // fall thru
-    case Interpreter::java_lang_math_tan     : // fall thru
-    case Interpreter::java_lang_math_abs     : // fall thru
-    case Interpreter::java_lang_math_log     : // fall thru
-    case Interpreter::java_lang_math_log10   : // fall thru
-    case Interpreter::java_lang_math_sqrt    : // fall thru
-    case Interpreter::java_lang_math_pow     : // fall thru
-    case Interpreter::java_lang_math_exp     : // fall thru
-      entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind);     break;
-    case Interpreter::java_lang_ref_reference_get
-                                             : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
-    default                                  : ShouldNotReachHere();                                                       break;
-  }
-
-  if (entry_point) return entry_point;
-
-  return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
-
-}
 
 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
  : CppInterpreterGenerator(code) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/x86/vm/interpreterGenerator_x86.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "interpreter/interp_masm.hpp"
+
+#define __ _masm->
+
+// Jump into normal path for accessor and empty entry to jump to normal entry
+// The "fast" optimization don't update compilation count therefore can disable inlining
+// for these functions that should be inlined.
+address InterpreterGenerator::generate_jump_to_normal_entry(void) {
+  address entry_point = __ pc();
+
+  assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
+  __ jump(RuntimeAddress(Interpreter::entry_for_kind(Interpreter::zerolocals)));
+  return entry_point;
+}
+
+// Abstract method entry
+// Attempt to execute abstract method. Throw exception
+address InterpreterGenerator::generate_abstract_entry(void) {
+
+  address entry_point = __ pc();
+
+  // abstract method entry
+
+#ifndef CC_INTERP
+  //  pop return address, reset last_sp to NULL
+  __ empty_expression_stack();
+  __ restore_bcp();      // rsi must be correct for exception handler   (was destroyed)
+  __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
+#endif
+
+  // throw exception
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
+  // the call_VM checks for exception, so we should never return here.
+  __ should_not_reach_here();
+
+  return entry_point;
+}
--- a/hotspot/src/cpu/x86/vm/interpreterGenerator_x86.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/interpreterGenerator_x86.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,8 +36,9 @@
   address generate_native_entry(bool synchronized);
   address generate_abstract_entry(void);
   address generate_math_entry(AbstractInterpreter::MethodKind kind);
-  address generate_empty_entry(void);
-  address generate_accessor_entry(void);
+  address generate_jump_to_normal_entry(void);
+  address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
+  address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
   address generate_Reference_get_entry();
   address generate_CRC32_update_entry();
   address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
--- a/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -67,45 +67,6 @@
 }
 
 
-//
-// Various method entries (that c++ and asm interpreter agree upon)
-//------------------------------------------------------------------------------------------------------------------------
-//
-//
-
-// Empty method, generate a very fast return.
-
-address InterpreterGenerator::generate_empty_entry(void) {
-
-  // rbx,: Method*
-  // rcx: receiver (unused)
-  // rsi: previous interpreter state (C++ interpreter) must preserve
-  // rsi: sender sp must set sp to this value on return
-
-  if (!UseFastEmptyMethods) return NULL;
-
-  address entry_point = __ pc();
-
-  // If we need a safepoint check, generate full interpreter entry.
-  Label slow_path;
-  ExternalAddress state(SafepointSynchronize::address_of_state());
-  __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
-           SafepointSynchronize::_not_synchronized);
-  __ jcc(Assembler::notEqual, slow_path);
-
-  // do nothing for empty methods (do not even increment invocation counter)
-  // Code: _return
-  // _return
-  // return w/o popping parameters
-  __ pop(rax);
-  __ mov(rsp, rsi);
-  __ jmp(rax);
-
-  __ bind(slow_path);
-  (void) generate_normal_entry(false);
-  return entry_point;
-}
-
 address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
 
   // rbx,: Method*
@@ -216,36 +177,6 @@
 }
 
 
-// Abstract method entry
-// Attempt to execute abstract method. Throw exception
-address InterpreterGenerator::generate_abstract_entry(void) {
-
-  // rbx,: Method*
-  // rcx: receiver (unused)
-  // rsi: previous interpreter state (C++ interpreter) must preserve
-
-  // rsi: sender SP
-
-  address entry_point = __ pc();
-
-  // abstract method entry
-
-#ifndef CC_INTERP
-  //  pop return address, reset last_sp to NULL
-  __ empty_expression_stack();
-  __ restore_bcp();      // rsi must be correct for exception handler   (was destroyed)
-  __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
-#endif
-
-  // throw exception
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
-  // the call_VM checks for exception, so we should never return here.
-  __ should_not_reach_here();
-
-  return entry_point;
-}
-
-
 void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
 
   // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
--- a/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -301,66 +301,6 @@
   return entry_point;
 }
 
-
-// Abstract method entry
-// Attempt to execute abstract method. Throw exception
-address InterpreterGenerator::generate_abstract_entry(void) {
-  // rbx: Method*
-  // r13: sender SP
-
-  address entry_point = __ pc();
-
-  // abstract method entry
-
-#ifndef CC_INTERP
-  //  pop return address, reset last_sp to NULL
-  __ empty_expression_stack();
-  __ restore_bcp();      // rsi must be correct for exception handler   (was destroyed)
-  __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
-#endif
-
-  // throw exception
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address,
-                             InterpreterRuntime::throw_AbstractMethodError));
-  // the call_VM checks for exception, so we should never return here.
-  __ should_not_reach_here();
-
-  return entry_point;
-}
-
-
-// Empty method, generate a very fast return.
-
-address InterpreterGenerator::generate_empty_entry(void) {
-  // rbx: Method*
-  // r13: sender sp must set sp to this value on return
-
-  if (!UseFastEmptyMethods) {
-    return NULL;
-  }
-
-  address entry_point = __ pc();
-
-  // If we need a safepoint check, generate full interpreter entry.
-  Label slow_path;
-  __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
-           SafepointSynchronize::_not_synchronized);
-  __ jcc(Assembler::notEqual, slow_path);
-
-  // do nothing for empty methods (do not even increment invocation counter)
-  // Code: _return
-  // _return
-  // return w/o popping parameters
-  __ pop(rax);
-  __ mov(rsp, r13);
-  __ jmp(rax);
-
-  __ bind(slow_path);
-  (void) generate_normal_entry(false);
-  return entry_point;
-
-}
-
 void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
 
   // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -7316,17 +7316,34 @@
  * Fold 128-bit data chunk
  */
 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
-  vpclmulhdq(xtmp, xK, xcrc); // [123:64]
-  vpclmulldq(xcrc, xK, xcrc); // [63:0]
-  vpxor(xcrc, xcrc, Address(buf, offset), false /* vector256 */);
-  pxor(xcrc, xtmp);
+  if (UseAVX > 0) {
+    vpclmulhdq(xtmp, xK, xcrc); // [123:64]
+    vpclmulldq(xcrc, xK, xcrc); // [63:0]
+    vpxor(xcrc, xcrc, Address(buf, offset), false /* vector256 */);
+    pxor(xcrc, xtmp);
+  } else {
+    movdqa(xtmp, xcrc);
+    pclmulhdq(xtmp, xK);   // [123:64]
+    pclmulldq(xcrc, xK);   // [63:0]
+    pxor(xcrc, xtmp);
+    movdqu(xtmp, Address(buf, offset));
+    pxor(xcrc, xtmp);
+  }
 }
 
 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) {
-  vpclmulhdq(xtmp, xK, xcrc);
-  vpclmulldq(xcrc, xK, xcrc);
-  pxor(xcrc, xbuf);
-  pxor(xcrc, xtmp);
+  if (UseAVX > 0) {
+    vpclmulhdq(xtmp, xK, xcrc);
+    vpclmulldq(xcrc, xK, xcrc);
+    pxor(xcrc, xbuf);
+    pxor(xcrc, xtmp);
+  } else {
+    movdqa(xtmp, xcrc);
+    pclmulhdq(xtmp, xK);
+    pclmulldq(xcrc, xK);
+    pxor(xcrc, xbuf);
+    pxor(xcrc, xtmp);
+  }
 }
 
 /**
@@ -7444,9 +7461,17 @@
   // Fold 128 bits in xmm1 down into 32 bits in crc register.
   BIND(L_fold_128b);
   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()));
-  vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
-  vpand(xmm3, xmm0, xmm2, false /* vector256 */);
-  vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
+  if (UseAVX > 0) {
+    vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
+    vpand(xmm3, xmm0, xmm2, false /* vector256 */);
+    vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
+  } else {
+    movdqa(xmm2, xmm0);
+    pclmulqdq(xmm2, xmm1, 0x1);
+    movdqa(xmm3, xmm0);
+    pand(xmm3, xmm2);
+    pclmulqdq(xmm0, xmm3, 0x1);
+  }
   psrldq(xmm1, 8);
   psrldq(xmm2, 4);
   pxor(xmm0, xmm1);
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -966,6 +966,16 @@
   void mulss(XMMRegister dst, Address src)        { Assembler::mulss(dst, src); }
   void mulss(XMMRegister dst, AddressLiteral src);
 
+  // Carry-Less Multiplication Quadword
+  void pclmulldq(XMMRegister dst, XMMRegister src) {
+    // 0x00 - multiply lower 64 bits [0:63]
+    Assembler::pclmulqdq(dst, src, 0x00);
+  }
+  void pclmulhdq(XMMRegister dst, XMMRegister src) {
+    // 0x11 - multiply upper 64 bits [64:127]
+    Assembler::pclmulqdq(dst, src, 0x11);
+  }
+
   void sqrtsd(XMMRegister dst, XMMRegister src)    { Assembler::sqrtsd(dst, src); }
   void sqrtsd(XMMRegister dst, Address src)        { Assembler::sqrtsd(dst, src); }
   void sqrtsd(XMMRegister dst, AddressLiteral src);
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -38,7 +38,7 @@
                                          int callee_locals,
                                          bool is_top_frame) {
   // Note: This calculation must exactly parallel the frame setup
-  // in AbstractInterpreterGenerator::generate_method_entry.
+  // in InterpreterGenerator::generate_fixed_frame.
 
   // fixed size of an interpreter frame:
   int overhead = frame::sender_sp_offset -
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -468,10 +468,10 @@
   // rax,
 
   // NOTE:  since the additional locals are also always pushed (wasn't obvious in
-  // generate_method_entry) so the guard should work for them too.
+  // generate_fixed_frame) so the guard should work for them too.
   //
 
-  // monitor entry size: see picture of stack set (generate_method_entry) and frame_x86.hpp
+  // monitor entry size: see picture of stack in frame_x86.hpp
   const int entry_size    = frame::interpreter_frame_monitor_size() * wordSize;
 
   // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
@@ -633,145 +633,6 @@
   __ movptr(Address(rsp, 0), rsp);                    // set expression stack bottom
 }
 
-// End of helpers
-
-//
-// Various method entries
-//------------------------------------------------------------------------------------------------------------------------
-//
-//
-
-// Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
-
-address InterpreterGenerator::generate_accessor_entry(void) {
-
-  // rbx,: Method*
-  // rcx: receiver (preserve for slow entry into asm interpreter)
-
-  // rsi: senderSP must preserved for slow path, set SP to it on fast path
-
-  address entry_point = __ pc();
-  Label xreturn_path;
-
-  // do fastpath for resolved accessor methods
-  if (UseFastAccessorMethods) {
-    Label slow_path;
-    // If we need a safepoint check, generate full interpreter entry.
-    ExternalAddress state(SafepointSynchronize::address_of_state());
-    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
-             SafepointSynchronize::_not_synchronized);
-
-    __ jcc(Assembler::notEqual, slow_path);
-    // ASM/C++ Interpreter
-    // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
-    // Note: We can only use this code if the getfield has been resolved
-    //       and if we don't have a null-pointer exception => check for
-    //       these conditions first and use slow path if necessary.
-    // rbx,: method
-    // rcx: receiver
-    __ movptr(rax, Address(rsp, wordSize));
-
-    // check if local 0 != NULL and read field
-    __ testptr(rax, rax);
-    __ jcc(Assembler::zero, slow_path);
-
-    // read first instruction word and extract bytecode @ 1 and index @ 2
-    __ movptr(rdx, Address(rbx, Method::const_offset()));
-    __ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
-    __ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
-    // Shift codes right to get the index on the right.
-    // The bytecode fetched looks like <index><0xb4><0x2a>
-    __ shrl(rdx, 2*BitsPerByte);
-    __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
-    __ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
-
-    // rax,: local 0
-    // rbx,: method
-    // rcx: receiver - do not destroy since it is needed for slow path!
-    // rcx: scratch
-    // rdx: constant pool cache index
-    // rdi: constant pool cache
-    // rsi: sender sp
-
-    // check if getfield has been resolved and read constant pool cache entry
-    // check the validity of the cache entry by testing whether _indices field
-    // contains Bytecode::_getfield in b1 byte.
-    assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
-    __ movl(rcx,
-            Address(rdi,
-                    rdx,
-                    Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
-    __ shrl(rcx, 2*BitsPerByte);
-    __ andl(rcx, 0xFF);
-    __ cmpl(rcx, Bytecodes::_getfield);
-    __ jcc(Assembler::notEqual, slow_path);
-
-    // Note: constant pool entry is not valid before bytecode is resolved
-    __ movptr(rcx,
-              Address(rdi,
-                      rdx,
-                      Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
-    __ movl(rdx,
-            Address(rdi,
-                    rdx,
-                    Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
-
-    Label notByte, notShort, notChar;
-    const Address field_address (rax, rcx, Address::times_1);
-
-    // Need to differentiate between igetfield, agetfield, bgetfield etc.
-    // because they are different sizes.
-    // Use the type from the constant pool cache
-    __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
-    // Make sure we don't need to mask rdx after the above shift
-    ConstantPoolCacheEntry::verify_tos_state_shift();
-    __ cmpl(rdx, btos);
-    __ jcc(Assembler::notEqual, notByte);
-    __ load_signed_byte(rax, field_address);
-    __ jmp(xreturn_path);
-
-    __ bind(notByte);
-    __ cmpl(rdx, stos);
-    __ jcc(Assembler::notEqual, notShort);
-    __ load_signed_short(rax, field_address);
-    __ jmp(xreturn_path);
-
-    __ bind(notShort);
-    __ cmpl(rdx, ctos);
-    __ jcc(Assembler::notEqual, notChar);
-    __ load_unsigned_short(rax, field_address);
-    __ jmp(xreturn_path);
-
-    __ bind(notChar);
-#ifdef ASSERT
-    Label okay;
-    __ cmpl(rdx, atos);
-    __ jcc(Assembler::equal, okay);
-    __ cmpl(rdx, itos);
-    __ jcc(Assembler::equal, okay);
-    __ stop("what type is this?");
-    __ bind(okay);
-#endif // ASSERT
-    // All the rest are a 32 bit wordsize
-    // This is ok for now. Since fast accessors should be going away
-    __ movptr(rax, field_address);
-
-    __ bind(xreturn_path);
-
-    // _ireturn/_areturn
-    __ pop(rdi);                               // get return address
-    __ mov(rsp, rsi);                          // set sp to sender sp
-    __ jmp(rdi);
-
-    // generate a vanilla interpreter entry as the slow path
-    __ bind(slow_path);
-
-    (void) generate_normal_entry(false);
-    return entry_point;
-  }
-  return NULL;
-
-}
 
 // Method entry for java.lang.ref.Reference.get.
 address InterpreterGenerator::generate_Reference_get_entry(void) {
@@ -862,7 +723,7 @@
 
   // If G1 is not enabled then attempt to go through the accessor entry point
   // Reference.get is an accessor
-  return generate_accessor_entry();
+  return generate_jump_to_normal_entry();
 }
 
 /**
@@ -1557,100 +1418,6 @@
   return entry_point;
 }
 
-//------------------------------------------------------------------------------------------------------------------------
-// Entry points
-//
-// Here we generate the various kind of entries into the interpreter.
-// The two main entry type are generic bytecode methods and native call method.
-// These both come in synchronized and non-synchronized versions but the
-// frame layout they create is very similar. The other method entry
-// types are really just special purpose entries that are really entry
-// and interpretation all in one. These are for trivial methods like
-// accessor, empty, or special math methods.
-//
-// When control flow reaches any of the entry types for the interpreter
-// the following holds ->
-//
-// Arguments:
-//
-// rbx,: Method*
-// rcx: receiver
-//
-//
-// Stack layout immediately at entry
-//
-// [ return address     ] <--- rsp
-// [ parameter n        ]
-//   ...
-// [ parameter 1        ]
-// [ expression stack   ] (caller's java expression stack)
-
-// Assuming that we don't go to one of the trivial specialized
-// entries the stack will look like below when we are ready to execute
-// the first bytecode (or call the native routine). The register usage
-// will be as the template based interpreter expects (see interpreter_x86.hpp).
-//
-// local variables follow incoming parameters immediately; i.e.
-// the return address is moved to the end of the locals).
-//
-// [ monitor entry      ] <--- rsp
-//   ...
-// [ monitor entry      ]
-// [ expr. stack bottom ]
-// [ saved rsi          ]
-// [ current rdi        ]
-// [ Method*            ]
-// [ saved rbp,          ] <--- rbp,
-// [ return address     ]
-// [ local variable m   ]
-//   ...
-// [ local variable 1   ]
-// [ parameter n        ]
-//   ...
-// [ parameter 1        ] <--- rdi
-
-address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
-  // determine code generation flags
-  bool synchronized = false;
-  address entry_point = NULL;
-  InterpreterGenerator* ig_this = (InterpreterGenerator*)this;
-
-  switch (kind) {
-    case Interpreter::zerolocals             :                                                       break;
-    case Interpreter::zerolocals_synchronized: synchronized = true;                                  break;
-    case Interpreter::native                 : entry_point = ig_this->generate_native_entry(false);  break;
-    case Interpreter::native_synchronized    : entry_point = ig_this->generate_native_entry(true);   break;
-    case Interpreter::empty                  : entry_point = ig_this->generate_empty_entry();        break;
-    case Interpreter::accessor               : entry_point = ig_this->generate_accessor_entry();     break;
-    case Interpreter::abstract               : entry_point = ig_this->generate_abstract_entry();     break;
-
-    case Interpreter::java_lang_math_sin     : // fall thru
-    case Interpreter::java_lang_math_cos     : // fall thru
-    case Interpreter::java_lang_math_tan     : // fall thru
-    case Interpreter::java_lang_math_abs     : // fall thru
-    case Interpreter::java_lang_math_log     : // fall thru
-    case Interpreter::java_lang_math_log10   : // fall thru
-    case Interpreter::java_lang_math_sqrt    : // fall thru
-    case Interpreter::java_lang_math_pow     : // fall thru
-    case Interpreter::java_lang_math_exp     : entry_point = ig_this->generate_math_entry(kind);      break;
-    case Interpreter::java_lang_ref_reference_get
-                                             : entry_point = ig_this->generate_Reference_get_entry(); break;
-    case Interpreter::java_util_zip_CRC32_update
-                                             : entry_point = ig_this->generate_CRC32_update_entry();  break;
-    case Interpreter::java_util_zip_CRC32_updateBytes
-                                             : // fall thru
-    case Interpreter::java_util_zip_CRC32_updateByteBuffer
-                                             : entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break;
-    default:
-      fatal(err_msg("unexpected method kind: %d", kind));
-      break;
-  }
-
-  if (entry_point) return entry_point;
-
-  return ig_this->generate_normal_entry(synchronized);
-
-}
 
 // These should never be compiled since the interpreter will prefer
 // the compiled version to the intrinsic version.
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -400,7 +400,7 @@
 // page mechanism will work for that.
 //
 // NOTE: Since the additional locals are also always pushed (wasn't
-// obvious in generate_method_entry) so the guard should work for them
+// obvious in generate_fixed_frame) so the guard should work for them
 // too.
 //
 // Args:
@@ -411,8 +411,7 @@
 //      rax
 void InterpreterGenerator::generate_stack_overflow_check(void) {
 
-  // monitor entry size: see picture of stack set
-  // (generate_method_entry) and frame_amd64.hpp
+  // monitor entry size: see picture of stack in frame_x86.hpp
   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
 
   // total overhead size: entry_size + (saved rbp through expr stack
@@ -600,153 +599,6 @@
 
 // End of helpers
 
-// Various method entries
-//------------------------------------------------------------------------------------------------------------------------
-//
-//
-
-// Call an accessor method (assuming it is resolved, otherwise drop
-// into vanilla (slow path) entry
-address InterpreterGenerator::generate_accessor_entry(void) {
-  // rbx: Method*
-
-  // r13: senderSP must preserver for slow path, set SP to it on fast path
-
-  address entry_point = __ pc();
-  Label xreturn_path;
-
-  // do fastpath for resolved accessor methods
-  if (UseFastAccessorMethods) {
-    // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites
-    //       thereof; parameter size = 1
-    // Note: We can only use this code if the getfield has been resolved
-    //       and if we don't have a null-pointer exception => check for
-    //       these conditions first and use slow path if necessary.
-    Label slow_path;
-    // If we need a safepoint check, generate full interpreter entry.
-    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
-             SafepointSynchronize::_not_synchronized);
-
-    __ jcc(Assembler::notEqual, slow_path);
-    // rbx: method
-    __ movptr(rax, Address(rsp, wordSize));
-
-    // check if local 0 != NULL and read field
-    __ testptr(rax, rax);
-    __ jcc(Assembler::zero, slow_path);
-
-    // read first instruction word and extract bytecode @ 1 and index @ 2
-    __ movptr(rdx, Address(rbx, Method::const_offset()));
-    __ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
-    __ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
-    // Shift codes right to get the index on the right.
-    // The bytecode fetched looks like <index><0xb4><0x2a>
-    __ shrl(rdx, 2 * BitsPerByte);
-    __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
-    __ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
-
-    // rax: local 0
-    // rbx: method
-    // rdx: constant pool cache index
-    // rdi: constant pool cache
-
-    // check if getfield has been resolved and read constant pool cache entry
-    // check the validity of the cache entry by testing whether _indices field
-    // contains Bytecode::_getfield in b1 byte.
-    assert(in_words(ConstantPoolCacheEntry::size()) == 4,
-           "adjust shift below");
-    __ movl(rcx,
-            Address(rdi,
-                    rdx,
-                    Address::times_8,
-                    ConstantPoolCache::base_offset() +
-                    ConstantPoolCacheEntry::indices_offset()));
-    __ shrl(rcx, 2 * BitsPerByte);
-    __ andl(rcx, 0xFF);
-    __ cmpl(rcx, Bytecodes::_getfield);
-    __ jcc(Assembler::notEqual, slow_path);
-
-    // Note: constant pool entry is not valid before bytecode is resolved
-    __ movptr(rcx,
-              Address(rdi,
-                      rdx,
-                      Address::times_8,
-                      ConstantPoolCache::base_offset() +
-                      ConstantPoolCacheEntry::f2_offset()));
-    // edx: flags
-    __ movl(rdx,
-            Address(rdi,
-                    rdx,
-                    Address::times_8,
-                    ConstantPoolCache::base_offset() +
-                    ConstantPoolCacheEntry::flags_offset()));
-
-    Label notObj, notInt, notByte, notShort;
-    const Address field_address(rax, rcx, Address::times_1);
-
-    // Need to differentiate between igetfield, agetfield, bgetfield etc.
-    // because they are different sizes.
-    // Use the type from the constant pool cache
-    __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
-    // Make sure we don't need to mask edx after the above shift
-    ConstantPoolCacheEntry::verify_tos_state_shift();
-
-    __ cmpl(rdx, atos);
-    __ jcc(Assembler::notEqual, notObj);
-    // atos
-    __ load_heap_oop(rax, field_address);
-    __ jmp(xreturn_path);
-
-    __ bind(notObj);
-    __ cmpl(rdx, itos);
-    __ jcc(Assembler::notEqual, notInt);
-    // itos
-    __ movl(rax, field_address);
-    __ jmp(xreturn_path);
-
-    __ bind(notInt);
-    __ cmpl(rdx, btos);
-    __ jcc(Assembler::notEqual, notByte);
-    // btos
-    __ load_signed_byte(rax, field_address);
-    __ jmp(xreturn_path);
-
-    __ bind(notByte);
-    __ cmpl(rdx, stos);
-    __ jcc(Assembler::notEqual, notShort);
-    // stos
-    __ load_signed_short(rax, field_address);
-    __ jmp(xreturn_path);
-
-    __ bind(notShort);
-#ifdef ASSERT
-    Label okay;
-    __ cmpl(rdx, ctos);
-    __ jcc(Assembler::equal, okay);
-    __ stop("what type is this?");
-    __ bind(okay);
-#endif
-    // ctos
-    __ load_unsigned_short(rax, field_address);
-
-    __ bind(xreturn_path);
-
-    // _ireturn/_areturn
-    __ pop(rdi);
-    __ mov(rsp, r13);
-    __ jmp(rdi);
-    __ ret(0);
-
-    // generate a vanilla interpreter entry as the slow path
-    __ bind(slow_path);
-    (void) generate_normal_entry(false);
-  } else {
-    (void) generate_normal_entry(false);
-  }
-
-  return entry_point;
-}
-
 // Method entry for java.lang.ref.Reference.get.
 address InterpreterGenerator::generate_Reference_get_entry(void) {
 #if INCLUDE_ALL_GCS
@@ -773,8 +625,6 @@
   //   and so we don't need to call the G1 pre-barrier. Thus we can use the
   //   regular method entry code to generate the NPE.
   //
-  // This code is based on generate_accessor_enty.
-  //
   // rbx: Method*
 
   // r13: senderSP must preserve for slow path, set SP to it on fast path
@@ -832,7 +682,7 @@
 
   // If G1 is not enabled then attempt to go through the accessor entry point
   // Reference.get is an accessor
-  return generate_accessor_entry();
+  return generate_jump_to_normal_entry();
 }
 
 /**
@@ -1566,100 +1416,6 @@
   return entry_point;
 }
 
-// Entry points
-//
-// Here we generate the various kind of entries into the interpreter.
-// The two main entry type are generic bytecode methods and native
-// call method.  These both come in synchronized and non-synchronized
-// versions but the frame layout they create is very similar. The
-// other method entry types are really just special purpose entries
-// that are really entry and interpretation all in one. These are for
-// trivial methods like accessor, empty, or special math methods.
-//
-// When control flow reaches any of the entry types for the interpreter
-// the following holds ->
-//
-// Arguments:
-//
-// rbx: Method*
-//
-// Stack layout immediately at entry
-//
-// [ return address     ] <--- rsp
-// [ parameter n        ]
-//   ...
-// [ parameter 1        ]
-// [ expression stack   ] (caller's java expression stack)
-
-// Assuming that we don't go to one of the trivial specialized entries
-// the stack will look like below when we are ready to execute the
-// first bytecode (or call the native routine). The register usage
-// will be as the template based interpreter expects (see
-// interpreter_amd64.hpp).
-//
-// local variables follow incoming parameters immediately; i.e.
-// the return address is moved to the end of the locals).
-//
-// [ monitor entry      ] <--- rsp
-//   ...
-// [ monitor entry      ]
-// [ expr. stack bottom ]
-// [ saved r13          ]
-// [ current r14        ]
-// [ Method*            ]
-// [ saved ebp          ] <--- rbp
-// [ return address     ]
-// [ local variable m   ]
-//   ...
-// [ local variable 1   ]
-// [ parameter n        ]
-//   ...
-// [ parameter 1        ] <--- r14
-
-address AbstractInterpreterGenerator::generate_method_entry(
-                                        AbstractInterpreter::MethodKind kind) {
-  // determine code generation flags
-  bool synchronized = false;
-  address entry_point = NULL;
-  InterpreterGenerator* ig_this = (InterpreterGenerator*)this;
-
-  switch (kind) {
-  case Interpreter::zerolocals             :                                                      break;
-  case Interpreter::zerolocals_synchronized: synchronized = true;                                 break;
-  case Interpreter::native                 : entry_point = ig_this->generate_native_entry(false); break;
-  case Interpreter::native_synchronized    : entry_point = ig_this->generate_native_entry(true);  break;
-  case Interpreter::empty                  : entry_point = ig_this->generate_empty_entry();       break;
-  case Interpreter::accessor               : entry_point = ig_this->generate_accessor_entry();    break;
-  case Interpreter::abstract               : entry_point = ig_this->generate_abstract_entry();    break;
-
-  case Interpreter::java_lang_math_sin     : // fall thru
-  case Interpreter::java_lang_math_cos     : // fall thru
-  case Interpreter::java_lang_math_tan     : // fall thru
-  case Interpreter::java_lang_math_abs     : // fall thru
-  case Interpreter::java_lang_math_log     : // fall thru
-  case Interpreter::java_lang_math_log10   : // fall thru
-  case Interpreter::java_lang_math_sqrt    : // fall thru
-  case Interpreter::java_lang_math_pow     : // fall thru
-  case Interpreter::java_lang_math_exp     : entry_point = ig_this->generate_math_entry(kind);      break;
-  case Interpreter::java_lang_ref_reference_get
-                                           : entry_point = ig_this->generate_Reference_get_entry(); break;
-  case Interpreter::java_util_zip_CRC32_update
-                                           : entry_point = ig_this->generate_CRC32_update_entry();  break;
-  case Interpreter::java_util_zip_CRC32_updateBytes
-                                           : // fall thru
-  case Interpreter::java_util_zip_CRC32_updateByteBuffer
-                                           : entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break;
-  default:
-    fatal(err_msg("unexpected method kind: %d", kind));
-    break;
-  }
-
-  if (entry_point) {
-    return entry_point;
-  }
-
-  return ig_this->generate_normal_entry(synchronized);
-}
 
 // These should never be compiled since the interpreter will prefer
 // the compiled version to the intrinsic version.
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
 #include "asm/macroAssembler.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/java.hpp"
+#include "runtime/os.hpp"
 #include "runtime/stubCodeGenerator.hpp"
 #include "vm_version_x86.hpp"
 
@@ -514,7 +515,7 @@
                (supports_tscinv() ? ", tscinv": ""),
                (supports_bmi1() ? ", bmi1" : ""),
                (supports_bmi2() ? ", bmi2" : ""));
-  _features_str = strdup(buf);
+  _features_str = os::strdup(buf);
 
   // UseSSE is set to the smaller of what hardware supports and what
   // the command line requires.  I.e., you cannot set UseSSE to 2 on
@@ -559,7 +560,7 @@
     FLAG_SET_DEFAULT(UseCLMUL, false);
   }
 
-  if (UseCLMUL && (UseAVX > 0) && (UseSSE > 2)) {
+  if (UseCLMUL && (UseSSE > 2)) {
     if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
       UseCRC32Intrinsics = true;
     }
@@ -805,6 +806,21 @@
         }
       }
     }
+    if ((cpu_family() == 0x06) &&
+        ((extended_cpu_model() == 0x36) || // Centerton
+         (extended_cpu_model() == 0x37) || // Silvermont
+         (extended_cpu_model() == 0x4D))) {
+#ifdef COMPILER2
+      if (FLAG_IS_DEFAULT(OptoScheduling)) {
+        OptoScheduling = true;
+      }
+#endif
+      if (supports_sse4_2()) { // Silvermont
+        if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
+          UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
+        }
+      }
+    }
   }
 
   // Use count leading zeros count instruction if available.
@@ -892,23 +908,25 @@
   AllocatePrefetchDistance = allocate_prefetch_distance();
   AllocatePrefetchStyle    = allocate_prefetch_style();
 
-  if( is_intel() && cpu_family() == 6 && supports_sse3() ) {
-    if( AllocatePrefetchStyle == 2 ) { // watermark prefetching on Core
+  if (is_intel() && cpu_family() == 6 && supports_sse3()) {
+    if (AllocatePrefetchStyle == 2) { // watermark prefetching on Core
 #ifdef _LP64
       AllocatePrefetchDistance = 384;
 #else
       AllocatePrefetchDistance = 320;
 #endif
     }
-    if( supports_sse4_2() && supports_ht() ) { // Nehalem based cpus
+    if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus
       AllocatePrefetchDistance = 192;
       AllocatePrefetchLines = 4;
+    }
 #ifdef COMPILER2
-      if (AggressiveOpts && FLAG_IS_DEFAULT(UseFPUForSpilling)) {
+    if (supports_sse4_2()) {
+      if (FLAG_IS_DEFAULT(UseFPUForSpilling)) {
         FLAG_SET_DEFAULT(UseFPUForSpilling, true);
       }
+    }
 #endif
-    }
   }
   assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value");
 
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -831,60 +831,6 @@
   return generate_entry((address) CppInterpreter::normal_entry);
 }
 
-address AbstractInterpreterGenerator::generate_method_entry(
-    AbstractInterpreter::MethodKind kind) {
-  address entry_point = NULL;
-
-  switch (kind) {
-  case Interpreter::zerolocals:
-  case Interpreter::zerolocals_synchronized:
-    break;
-
-  case Interpreter::native:
-    entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false);
-    break;
-
-  case Interpreter::native_synchronized:
-    entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false);
-    break;
-
-  case Interpreter::empty:
-    entry_point = ((InterpreterGenerator*) this)->generate_empty_entry();
-    break;
-
-  case Interpreter::accessor:
-    entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry();
-    break;
-
-  case Interpreter::abstract:
-    entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry();
-    break;
-
-  case Interpreter::java_lang_math_sin:
-  case Interpreter::java_lang_math_cos:
-  case Interpreter::java_lang_math_tan:
-  case Interpreter::java_lang_math_abs:
-  case Interpreter::java_lang_math_log:
-  case Interpreter::java_lang_math_log10:
-  case Interpreter::java_lang_math_sqrt:
-  case Interpreter::java_lang_math_pow:
-  case Interpreter::java_lang_math_exp:
-    entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind);
-    break;
-
-  case Interpreter::java_lang_ref_reference_get:
-    entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry();
-    break;
-
-  default:
-    ShouldNotReachHere();
-  }
-
-  if (entry_point == NULL)
-    entry_point = ((InterpreterGenerator*) this)->generate_normal_entry(false);
-
-  return entry_point;
-}
 
 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
  : CppInterpreterGenerator(code) {
--- a/hotspot/src/cpu/zero/vm/globals_zero.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/zero/vm/globals_zero.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -61,6 +61,12 @@
 
 define_pd_global(uintx, TypeProfileLevel, 0);
 
-#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct)
+#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct)  \
+  product(bool, UseFastEmptyMethods, true,                                  \
+          "Use fast method entry code for empty methods")                   \
+                                                                            \
+  product(bool, UseFastAccessorMethods, true,                               \
+          "Use fast method entry code for accessor methods")                \
+                                                                            \
 
 #endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP
--- a/hotspot/src/cpu/zero/vm/interpreterGenerator_zero.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/cpu/zero/vm/interpreterGenerator_zero.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -39,4 +39,7 @@
   address generate_accessor_entry();
   address generate_Reference_get_entry();
 
+  // Not supported
+  address generate_CRC32_update_entry() { return NULL; }
+  address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
 #endif // CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP
--- a/hotspot/src/os/aix/vm/os_aix.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/os/aix/vm/os_aix.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -58,6 +58,7 @@
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
 #include "runtime/orderAccess.inline.hpp"
+#include "runtime/os.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/perfMemory.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -378,10 +379,10 @@
   // default should be 4K.
   size_t data_page_size = SIZE_4K;
   {
-    void* p = ::malloc(SIZE_16M);
+    void* p = os::malloc(SIZE_16M, mtInternal);
     guarantee(p != NULL, "malloc failed");
     data_page_size = os::Aix::query_pagesize(p);
-    ::free(p);
+    os::free(p);
   }
 
   // query default shm page size (LDR_CNTRL SHMPSIZE)
--- a/hotspot/src/os/aix/vm/porting_aix.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/os/aix/vm/porting_aix.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -24,6 +24,8 @@
 
 #include "asm/assembler.hpp"
 #include "memory/allocation.hpp"
+#include "memory/allocation.inline.hpp"
+#include "runtime/os.hpp"
 #include "loadlib_aix.hpp"
 #include "porting_aix.hpp"
 #include "utilities/debug.hpp"
@@ -83,7 +85,7 @@
     while (n) {
       node* p = n;
       n = n->next;
-      free(p->v);
+      os::free(p->v);
       delete p;
     }
   }
@@ -95,7 +97,7 @@
       }
     }
     node* p = new node;
-    p->v = strdup(s);
+    p->v = os::strdup_check_oom(s);
     p->next = first;
     first = p;
     return p->v;
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -2439,23 +2439,25 @@
   }
 
   // The memory is committed
-  MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
+  MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
 
   return addr;
 }
 
 bool os::release_memory_special(char* base, size_t bytes) {
-  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
-  // detaching the SHM segment will also delete it, see reserve_memory_special()
-  int rslt = shmdt(base);
-  if (rslt == 0) {
-    tkr.record((address)base, bytes);
-    return true;
+  if (MemTracker::tracking_level() > NMT_minimal) {
+    Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
+    // detaching the SHM segment will also delete it, see reserve_memory_special()
+    int rslt = shmdt(base);
+    if (rslt == 0) {
+      tkr.record((address)base, bytes);
+      return true;
+    } else {
+      return false;
+    }
   } else {
-    tkr.discard();
-    return false;
+    return shmdt(base) == 0;
   }
-
 }
 
 size_t os::large_page_size() {
--- a/hotspot/src/os/bsd/vm/perfMemory_bsd.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/os/bsd/vm/perfMemory_bsd.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -753,7 +753,7 @@
   (void)::memset((void*) mapAddress, 0, size);
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
+  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
 
   return mapAddress;
 }
@@ -918,7 +918,7 @@
   }
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
+  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
 
   *addr = mapAddress;
   *sizep = size;
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -3504,9 +3504,12 @@
 
   assert(is_ptr_aligned(start, alignment), "Must be");
 
-  // os::reserve_memory_special will record this memory area.
-  // Need to release it here to prevent overlapping reservations.
-  MemTracker::record_virtual_memory_release((address)start, bytes);
+  if (MemTracker::tracking_level() > NMT_minimal) {
+    // os::reserve_memory_special will record this memory area.
+    // Need to release it here to prevent overlapping reservations.
+    Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
+    tkr.record((address)start, bytes);
+  }
 
   char* end = start + bytes;
 
@@ -3601,7 +3604,7 @@
     }
 
     // The memory is committed
-    MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
+    MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
   }
 
   return addr;
@@ -3617,24 +3620,30 @@
 }
 
 bool os::release_memory_special(char* base, size_t bytes) {
+  bool res;
+  if (MemTracker::tracking_level() > NMT_minimal) {
+    Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
+    res = os::Linux::release_memory_special_impl(base, bytes);
+    if (res) {
+      tkr.record((address)base, bytes);
+    }
+
+  } else {
+    res = os::Linux::release_memory_special_impl(base, bytes);
+  }
+  return res;
+}
+
+bool os::Linux::release_memory_special_impl(char* base, size_t bytes) {
   assert(UseLargePages, "only for large pages");
-
-  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
-
   bool res;
+
   if (UseSHM) {
     res = os::Linux::release_memory_special_shm(base, bytes);
   } else {
     assert(UseHugeTLBFS, "must be");
     res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
   }
-
-  if (res) {
-    tkr.record((address)base, bytes);
-  } else {
-    tkr.discard();
-  }
-
   return res;
 }
 
--- a/hotspot/src/os/linux/vm/os_linux.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -108,6 +108,7 @@
   static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec);
   static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec);
 
+  static bool release_memory_special_impl(char* base, size_t bytes);
   static bool release_memory_special_shm(char* base, size_t bytes);
   static bool release_memory_special_huge_tlbfs(char* base, size_t bytes);
 
--- a/hotspot/src/os/linux/vm/perfMemory_linux.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/os/linux/vm/perfMemory_linux.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -753,7 +753,7 @@
   (void)::memset((void*) mapAddress, 0, size);
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
+  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
 
   return mapAddress;
 }
@@ -924,7 +924,7 @@
   }
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
+  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
 
   *addr = mapAddress;
   *sizep = size;
--- a/hotspot/src/os/posix/vm/os_posix.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/os/posix/vm/os_posix.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -75,21 +75,41 @@
   VMError::report_coredump_status(buffer, success);
 }
 
-address os::get_caller_pc(int n) {
+int os::get_native_stack(address* stack, int frames, int toSkip) {
 #ifdef _NMT_NOINLINE_
-  n ++;
+  toSkip++;
 #endif
+
+  int frame_idx = 0;
+  int num_of_frames;  // number of frames captured
   frame fr = os::current_frame();
-  while (n > 0 && fr.pc() &&
-    !os::is_first_C_frame(&fr) && fr.sender_pc()) {
-    fr = os::get_sender_for_C_frame(&fr);
-    n --;
+  while (fr.pc() && frame_idx < frames) {
+    if (toSkip > 0) {
+      toSkip --;
+    } else {
+      stack[frame_idx ++] = fr.pc();
+    }
+    if (fr.fp() == NULL || os::is_first_C_frame(&fr)
+        ||fr.sender_pc() == NULL || fr.cb() != NULL) break;
+
+    if (fr.sender_pc() && !os::is_first_C_frame(&fr)) {
+      fr = os::get_sender_for_C_frame(&fr);
+    } else {
+      break;
+    }
   }
-  if (n == 0) {
-    return fr.pc();
-  } else {
-    return NULL;
+  num_of_frames = frame_idx;
+  for (; frame_idx < frames; frame_idx ++) {
+    stack[frame_idx] = NULL;
   }
+
+  return num_of_frames;
+}
+
+
+bool os::unsetenv(const char* name) {
+  assert(name != NULL, "Null pointer");
+  return (::unsetenv(name) == 0);
 }
 
 int os::get_last_error() {
--- a/hotspot/src/os/solaris/vm/attachListener_solaris.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/os/solaris/vm/attachListener_solaris.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -199,23 +199,29 @@
 // Calls from the door function to check that the client credentials
 // match this process. Returns 0 if credentials okay, otherwise -1.
 static int check_credentials() {
-  door_cred_t cred_info;
+  ucred_t *cred_info = NULL;
+  int ret = -1; // deny by default
 
   // get client credentials
-  if (door_cred(&cred_info) == -1) {
-    return -1; // unable to get them
+  if (door_ucred(&cred_info) == -1) {
+    return -1; // unable to get them, deny
   }
 
   // get our euid/eguid (probably could cache these)
   uid_t euid = geteuid();
   gid_t egid = getegid();
 
-  // check that the effective uid/gid matches - discuss this with Jeff.
-  if (cred_info.dc_euid == euid && cred_info.dc_egid == egid) {
-    return 0;  // okay
-  } else {
-    return -1; // denied
+  // get euid/egid from ucred_free
+  uid_t ucred_euid = ucred_geteuid(cred_info);
+  gid_t ucred_egid = ucred_getegid(cred_info);
+
+  // check that the effective uid/gid matches
+  if (ucred_euid == euid && ucred_egid == egid) {
+    ret =  0;  // allow
   }
+
+  ucred_free(cred_info);
+  return ret;
 }
 
 
--- a/hotspot/src/os/solaris/vm/perfMemory_solaris.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/os/solaris/vm/perfMemory_solaris.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -770,7 +770,8 @@
   (void)::memset((void*) mapAddress, 0, size);
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
+  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
+    size, CURRENT_PC, mtInternal);
 
   return mapAddress;
 }
@@ -941,7 +942,8 @@
   }
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
+  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
+    size, CURRENT_PC, mtInternal);
 
   *addr = mapAddress;
   *sizep = size;
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -138,9 +138,8 @@
       // Workaround for issue when a custom launcher doesn't call
       // DestroyJavaVM and NMT is trying to track memory when free is
       // called from a static destructor
-      if (MemTracker::is_on()) {
-          MemTracker::shutdown(MemTracker::NMT_normal);
-      }
+      MemTracker::shutdown();
+
       break;
     default:
       break;
@@ -163,6 +162,10 @@
  return result > 0 && result < len;
 }
 
+bool os::unsetenv(const char* name) {
+  assert(name != NULL, "Null pointer");
+  return (SetEnvironmentVariable(name, NULL) == TRUE);
+}
 
 // No setuid programs under Windows.
 bool os::have_special_privileges() {
@@ -319,15 +322,16 @@
  * So far, this method is only used by Native Memory Tracking, which is
  * only supported on Windows XP or later.
  */
-address os::get_caller_pc(int n) {
+int os::get_native_stack(address* stack, int frames, int toSkip) {
 #ifdef _NMT_NOINLINE_
-  n++;
+  toSkip ++;
 #endif
-  address pc;
-  if (os::Kernel32Dll::RtlCaptureStackBackTrace(n + 1, 1, (PVOID*)&pc, NULL) == 1) {
-    return pc;
-  }
-  return NULL;
+  int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames,
+    (PVOID*)stack, NULL);
+  for (int index = captured; index < frames; index ++) {
+    stack[index] = NULL;
+  }
+  return captured;
 }
 
 
@@ -2901,7 +2905,7 @@
                                 PAGE_READWRITE);
   // If reservation failed, return NULL
   if (p_buf == NULL) return NULL;
-  MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, mtNone, CALLER_PC);
+  MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
   os::release_memory(p_buf, bytes + chunk_size);
 
   // we still need to round up to a page boundary (in case we are using large pages)
@@ -2967,7 +2971,7 @@
         // need to create a dummy 'reserve' record to match
         // the release.
         MemTracker::record_virtual_memory_reserve((address)p_buf,
-          bytes_to_release, mtNone, CALLER_PC);
+          bytes_to_release, CALLER_PC);
         os::release_memory(p_buf, bytes_to_release);
       }
 #ifdef ASSERT
@@ -2986,11 +2990,10 @@
   }
   // Although the memory is allocated individually, it is returned as one.
   // NMT records it as one block.
-  address pc = CALLER_PC;
   if ((flags & MEM_COMMIT) != 0) {
-    MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, mtNone, pc);
+    MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
   } else {
-    MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, mtNone, pc);
+    MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
   }
 
   // made it this far, success
@@ -3188,8 +3191,7 @@
     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
     if (res != NULL) {
-      address pc = CALLER_PC;
-      MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
+      MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
     }
 
     return res;
--- a/hotspot/src/os/windows/vm/perfMemory_windows.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/os/windows/vm/perfMemory_windows.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
 #include "oops/oop.inline.hpp"
 #include "os_windows.inline.hpp"
 #include "runtime/handles.inline.hpp"
+#include "runtime/os.hpp"
 #include "runtime/perfMemory.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/exceptions.hpp"
@@ -1388,7 +1389,7 @@
   // the file has been successfully created and the file mapping
   // object has been created.
   sharedmem_fileHandle = fh;
-  sharedmem_fileName = strdup(filename);
+  sharedmem_fileName = os::strdup(filename);
 
   return fmh;
 }
@@ -1498,7 +1499,8 @@
   (void)memset(mapAddress, '\0', size);
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
+  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
+    size, CURRENT_PC, mtInternal);
 
   return (char*) mapAddress;
 }
@@ -1680,7 +1682,8 @@
   }
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
+  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size,
+    CURRENT_PC, mtInternal);
 
 
   *addrp = (char*)mapAddress;
@@ -1834,10 +1837,14 @@
     return;
   }
 
-  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
-  remove_file_mapping(addr);
-  // it does not go through os api, the operation has to record from here
-  tkr.record((address)addr, bytes);
+  if (MemTracker::tracking_level() > NMT_minimal) {
+    // it does not go through os api, the operation has to record from here
+    Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
+    remove_file_mapping(addr);
+    tkr.record((address)addr, bytes);
+  } else {
+    remove_file_mapping(addr);
+  }
 }
 
 char* PerfMemory::backing_store_filename() {
--- a/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -23,6 +23,8 @@
  */
 
 #include "precompiled.hpp"
+#include "memory/allocation.hpp"
+#include "memory/allocation.inline.hpp"
 #include "runtime/os.hpp"
 #include "vm_version_sparc.hpp"
 
@@ -48,7 +50,7 @@
   // All SI defines used below must be supported.
   guarantee(bufsize != -1, "must be supported");
 
-  char* buf = (char*) malloc(bufsize);
+  char* buf = (char*) os::malloc(bufsize, mtInternal);
 
   if (buf == NULL)
     return;
@@ -60,7 +62,7 @@
     }
   }
 
-  free(buf);
+  os::free(buf);
 }
 
 int VM_Version::platform_features(int features) {
@@ -161,7 +163,7 @@
 
     char   tmp;
     size_t bufsize = sysinfo(SI_ISALIST, &tmp, 1);
-    char*  buf     = (char*) malloc(bufsize);
+    char*  buf     = (char*) os::malloc(bufsize, mtInternal);
 
     if (buf != NULL) {
       if (sysinfo(SI_ISALIST, buf, bufsize) == bufsize) {
@@ -184,7 +186,7 @@
           if (vis[3] == '2')                     features |= vis2_instructions_m;
         }
       }
-      free(buf);
+      os::free(buf);
     }
   }
 
@@ -228,7 +230,7 @@
             }
 #endif
             // Convert to UPPER case before compare.
-            char* impl = strdup(implementation);
+            char* impl = os::strdup_check_oom(implementation);
 
             for (int i = 0; impl[i] != 0; i++)
               impl[i] = (char)toupper((uint)impl[i]);
@@ -252,7 +254,7 @@
                 implementation = "SPARC";
               }
             }
-            free((void*)impl);
+            os::free((void*)impl);
             break;
           }
         } // for(
--- a/hotspot/src/share/vm/adlc/output_c.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/adlc/output_c.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -997,7 +997,7 @@
   int nopcnt = 0;
   for ( _pipeline->_noplist.reset(); (nop = _pipeline->_noplist.iter()) != NULL; nopcnt++ );
 
-  fprintf(fp_cpp, "void Bundle::initialize_nops(MachNode * nop_list[%d], Compile *C) {\n", nopcnt);
+  fprintf(fp_cpp, "void Bundle::initialize_nops(MachNode * nop_list[%d]) {\n", nopcnt);
   int i = 0;
   for ( _pipeline->_noplist.reset(); (nop = _pipeline->_noplist.iter()) != NULL; i++ ) {
     fprintf(fp_cpp, "  nop_list[%d] = (MachNode *) new %sNode();\n", i, nop);
@@ -1369,7 +1369,7 @@
         fprintf(fp, "        ra_->add_reference(root, inst%d);\n", inst_num);
         fprintf(fp, "        ra_->set_oop (root, ra_->is_oop(inst%d));\n", inst_num);
         fprintf(fp, "        ra_->set_pair(root->_idx, ra_->get_reg_second(inst%d), ra_->get_reg_first(inst%d));\n", inst_num, inst_num);
-        fprintf(fp, "        root->_opnds[0] = inst%d->_opnds[0]->clone(C); // result\n", inst_num);
+        fprintf(fp, "        root->_opnds[0] = inst%d->_opnds[0]->clone(); // result\n", inst_num);
         fprintf(fp, "        // ----- Done with initial setup -----\n");
       } else {
         if( (op_form == NULL) || (op_form->is_base_constant(globals) == Form::none) ) {
@@ -1382,7 +1382,7 @@
         } else {
           fprintf(fp, "        // no ideal edge for constants after matching\n");
         }
-        fprintf(fp, "        root->_opnds[%d] = inst%d->_opnds[%d]->clone(C);\n",
+        fprintf(fp, "        root->_opnds[%d] = inst%d->_opnds[%d]->clone();\n",
                 opnds_index, inst_num, inst_op_num );
       }
       ++opnds_index;
@@ -1402,7 +1402,7 @@
 // Define the Peephole method for an instruction node
 void ArchDesc::definePeephole(FILE *fp, InstructForm *node) {
   // Generate Peephole function header
-  fprintf(fp, "MachNode *%sNode::peephole( Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted, Compile* C ) {\n", node->_ident);
+  fprintf(fp, "MachNode *%sNode::peephole(Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted) {\n", node->_ident);
   fprintf(fp, "  bool  matches = true;\n");
 
   // Identify the maximum instruction position,
@@ -1593,7 +1593,7 @@
       }
 
       const char *resultOper = new_inst->reduce_result();
-      fprintf(fp,"  n%d->set_opnd_array(0, state->MachOperGenerator( %s, C ));\n",
+      fprintf(fp,"  n%d->set_opnd_array(0, state->MachOperGenerator(%s));\n",
               cnt, machOperEnum(resultOper));
 
       // get the formal operand NameList
@@ -1634,7 +1634,7 @@
           // If there is no use of the created operand, just skip it
           if (new_pos != NameList::Not_in_list) {
             //Copy the operand from the original made above
-            fprintf(fp,"  n%d->set_opnd_array(%d, op%d->clone(C)); // %s\n",
+            fprintf(fp,"  n%d->set_opnd_array(%d, op%d->clone()); // %s\n",
                     cnt, new_pos, exp_pos-node->num_opnds(), opid);
             // Check for who defines this operand & add edge if needed
             fprintf(fp,"  if(tmp%d != NULL)\n", exp_pos);
@@ -1662,7 +1662,7 @@
           new_pos = new_inst->operand_position(parameter,Component::USE);
           if (new_pos != -1) {
             // Copy the operand from the ExpandNode to the new node
-            fprintf(fp,"  n%d->set_opnd_array(%d, opnd_array(%d)->clone(C)); // %s\n",
+            fprintf(fp,"  n%d->set_opnd_array(%d, opnd_array(%d)->clone()); // %s\n",
                     cnt, new_pos, exp_pos, opid);
             // For each operand add appropriate input edges by looking at tmp's
             fprintf(fp,"  if(tmp%d == this) {\n", exp_pos);
@@ -1729,14 +1729,14 @@
           declared_def = true;
         }
         if (op && op->_interface && op->_interface->is_RegInterface()) {
-          fprintf(fp,"  def = new MachTempNode(state->MachOperGenerator( %s, C ));\n",
+          fprintf(fp,"  def = new MachTempNode(state->MachOperGenerator(%s));\n",
                   machOperEnum(op->_ident));
           fprintf(fp,"  add_req(def);\n");
           // The operand for TEMP is already constructed during
           // this mach node construction, see buildMachNode().
           //
           // int idx  = node->operand_position_format(comp->_name);
-          // fprintf(fp,"  set_opnd_array(%d, state->MachOperGenerator( %s, C ));\n",
+          // fprintf(fp,"  set_opnd_array(%d, state->MachOperGenerator(%s));\n",
           //         idx, machOperEnum(op->_ident));
         } else {
           assert(false, "can't have temps which aren't registers");
@@ -1802,7 +1802,7 @@
         uint j = node->unique_opnds_idx(i);
         // unique_opnds_idx(i) is unique if unique_opnds_idx(j) is not unique.
         if( j != node->unique_opnds_idx(j) ) {
-          fprintf(fp,"  set_opnd_array(%d, opnd_array(%d)->clone(C)); // %s\n",
+          fprintf(fp,"  set_opnd_array(%d, opnd_array(%d)->clone()); // %s\n",
                   new_num_opnds, i, comp->_name);
           // delete not unique edges here
           fprintf(fp,"  for(unsigned i = 0; i < num%d; i++) {\n", i);
@@ -2839,12 +2839,12 @@
 
 // generate code to create a clone for a class derived from MachOper
 //
-// (0)  MachOper  *MachOperXOper::clone(Compile* C) const {
+// (0)  MachOper  *MachOperXOper::clone() const {
 // (1)    return new MachXOper( _ccode, _c0, _c1, ..., _cn);
 // (2)  }
 //
 static void defineClone(FILE *fp, FormDict &globalNames, OperandForm &oper) {
-  fprintf(fp,"MachOper *%sOper::clone(Compile* C) const {\n", oper._ident);
+  fprintf(fp,"MachOper *%sOper::clone() const {\n", oper._ident);
   // Check for constants that need to be copied over
   const int  num_consts    = oper.num_consts(globalNames);
   const bool is_ideal_bool = oper.is_ideal_bool();
@@ -3043,7 +3043,7 @@
 static void define_fill_new_machnode(bool used, FILE *fp_cpp) {
   fprintf(fp_cpp, "\n");
   fprintf(fp_cpp, "// Copy _idx, inputs and operands to new node\n");
-  fprintf(fp_cpp, "void MachNode::fill_new_machnode( MachNode* node, Compile* C) const {\n");
+  fprintf(fp_cpp, "void MachNode::fill_new_machnode(MachNode* node) const {\n");
   if( !used ) {
     fprintf(fp_cpp, "  // This architecture does not have cisc or short branch instructions\n");
     fprintf(fp_cpp, "  ShouldNotCallThis();\n");
@@ -3064,7 +3064,7 @@
     fprintf(fp_cpp, "  MachOper **to = node->_opnds;\n");
     fprintf(fp_cpp, "  for( int i = 0; i < nopnds; i++ ) {\n");
     fprintf(fp_cpp, "    if( i != cisc_operand() ) \n");
-    fprintf(fp_cpp, "      to[i] = _opnds[i]->clone(C);\n");
+    fprintf(fp_cpp, "      to[i] = _opnds[i]->clone();\n");
     fprintf(fp_cpp, "  }\n");
     fprintf(fp_cpp, "}\n");
   }
@@ -3105,7 +3105,7 @@
     if ( strcmp(oper->_ident,"label") == 0 ) {
       defineIn_RegMask(_CPP_MISC_file._fp, _globalNames, *oper);
 
-      fprintf(fp,"MachOper  *%sOper::clone(Compile* C) const {\n", oper->_ident);
+      fprintf(fp,"MachOper  *%sOper::clone() const {\n", oper->_ident);
       fprintf(fp,"  return  new %sOper(_label, _block_num);\n", oper->_ident);
       fprintf(fp,"}\n");
 
@@ -3124,7 +3124,7 @@
     if ( strcmp(oper->_ident,"method") == 0 ) {
       defineIn_RegMask(_CPP_MISC_file._fp, _globalNames, *oper);
 
-      fprintf(fp,"MachOper  *%sOper::clone(Compile* C) const {\n", oper->_ident);
+      fprintf(fp,"MachOper  *%sOper::clone() const {\n", oper->_ident);
       fprintf(fp,"  return  new %sOper(_method);\n", oper->_ident);
       fprintf(fp,"}\n");
 
@@ -3845,7 +3845,7 @@
           "// that invokes 'new' on the corresponding class constructor.\n");
   fprintf(fp_cpp, "\n");
   fprintf(fp_cpp, "MachOper *State::MachOperGenerator");
-  fprintf(fp_cpp, "(int opcode, Compile* C)");
+  fprintf(fp_cpp, "(int opcode)");
   fprintf(fp_cpp, "{\n");
   fprintf(fp_cpp, "\n");
   fprintf(fp_cpp, "  switch(opcode) {\n");
@@ -3921,7 +3921,7 @@
       int         index  = clist.operand_position(comp->_name, comp->_usedef, inst);
       const char *opcode = machOperEnum(comp->_type);
       fprintf(fp_cpp, "%s node->set_opnd_array(%d, ", indent, index);
-      fprintf(fp_cpp, "MachOperGenerator(%s, C));\n", opcode);
+      fprintf(fp_cpp, "MachOperGenerator(%s));\n", opcode);
       }
   }
   else if ( inst->is_chain_of_constant(_globalNames, opType) ) {
@@ -3978,7 +3978,7 @@
     InstructForm *inst_cisc = cisc_spill_alternate();
     if (inst_cisc != NULL) {
       fprintf(fp_hpp, "  virtual int            cisc_operand() const { return %d; }\n", cisc_spill_operand());
-      fprintf(fp_hpp, "  virtual MachNode      *cisc_version(int offset, Compile* C);\n");
+      fprintf(fp_hpp, "  virtual MachNode      *cisc_version(int offset);\n");
       fprintf(fp_hpp, "  virtual void           use_cisc_RegMask();\n");
       fprintf(fp_hpp, "  virtual const RegMask *cisc_RegMask() const { return _cisc_RegMask; }\n");
     }
@@ -4008,7 +4008,7 @@
     // Construct CISC version of this instruction
     fprintf(fp_cpp, "\n");
     fprintf(fp_cpp, "// Build CISC version of this instruction\n");
-    fprintf(fp_cpp, "MachNode *%sNode::cisc_version( int offset, Compile* C ) {\n", this->_ident);
+    fprintf(fp_cpp, "MachNode *%sNode::cisc_version(int offset) {\n", this->_ident);
     // Create the MachNode object
     fprintf(fp_cpp, "  %sNode *node = new %sNode();\n", name, name);
     // Fill in the bottom_type where requested
@@ -4023,7 +4023,7 @@
 
     fprintf(fp_cpp, "\n");
     fprintf(fp_cpp, "  // Copy _idx, inputs and operands to new node\n");
-    fprintf(fp_cpp, "  fill_new_machnode(node, C);\n");
+    fprintf(fp_cpp, "  fill_new_machnode(node);\n");
     // Construct operand to access [stack_pointer + offset]
     fprintf(fp_cpp, "  // Construct operand to access [stack_pointer + offset]\n");
     fprintf(fp_cpp, "  node->set_opnd_array(cisc_operand(), new %sOper(offset));\n", cisc_oper_name);
@@ -4042,7 +4042,7 @@
 // Build prototypes for short branch methods
 void InstructForm::declare_short_branch_methods(FILE *fp_hpp) {
   if (has_short_branch_form()) {
-    fprintf(fp_hpp, "  virtual MachNode      *short_branch_version(Compile* C);\n");
+    fprintf(fp_hpp, "  virtual MachNode      *short_branch_version();\n");
   }
 }
 
@@ -4055,7 +4055,7 @@
 
     // Construct short_branch_version() method.
     fprintf(fp_cpp, "// Build short branch version of this instruction\n");
-    fprintf(fp_cpp, "MachNode *%sNode::short_branch_version(Compile* C) {\n", this->_ident);
+    fprintf(fp_cpp, "MachNode *%sNode::short_branch_version() {\n", this->_ident);
     // Create the MachNode object
     fprintf(fp_cpp, "  %sNode *node = new %sNode();\n", name, name);
     if( is_ideal_if() ) {
@@ -4071,7 +4071,7 @@
     // Short branch version must use same node index for access
     // through allocator's tables
     fprintf(fp_cpp, "  // Copy _idx, inputs and operands to new node\n");
-    fprintf(fp_cpp, "  fill_new_machnode(node, C);\n");
+    fprintf(fp_cpp, "  fill_new_machnode(node);\n");
 
     // Return result and exit scope
     fprintf(fp_cpp, "  return node;\n");
@@ -4097,7 +4097,7 @@
           "// that invokes 'new' on the corresponding class constructor.\n");
   fprintf(fp_cpp, "\n");
   fprintf(fp_cpp, "MachNode *State::MachNodeGenerator");
-  fprintf(fp_cpp, "(int opcode, Compile* C)");
+  fprintf(fp_cpp, "(int opcode)");
   fprintf(fp_cpp, "{\n");
   fprintf(fp_cpp, "  switch(opcode) {\n");
 
--- a/hotspot/src/share/vm/adlc/output_h.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/adlc/output_h.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1119,7 +1119,7 @@
   fprintf(fp_hpp, "    _nop_count = %d\n",
     _pipeline->_nopcnt);
   fprintf(fp_hpp, "  };\n\n");
-  fprintf(fp_hpp, "  static void initialize_nops(MachNode *nop_list[%d], Compile* C);\n\n",
+  fprintf(fp_hpp, "  static void initialize_nops(MachNode *nop_list[%d]);\n\n",
     _pipeline->_nopcnt);
   fprintf(fp_hpp, "#ifndef PRODUCT\n");
   fprintf(fp_hpp, "  void dump(outputStream *st = tty) const;\n");
@@ -1240,7 +1240,7 @@
                       constant_type, _globalNames);
 
     // Clone function
-    fprintf(fp,"  virtual MachOper      *clone(Compile* C) const;\n");
+    fprintf(fp,"  virtual MachOper      *clone() const;\n");
 
     // Support setting a spill offset into a constant operand.
     // We only support setting an 'int' offset, while in the
@@ -1718,7 +1718,7 @@
 
     // If there is an explicit peephole rule, build it
     if ( instr->peepholes() != NULL ) {
-      fprintf(fp,"  virtual MachNode      *peephole(Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted, Compile *C);\n");
+      fprintf(fp,"  virtual MachNode      *peephole(Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted);\n");
     }
 
     // Output the declaration for number of relocation entries
@@ -1863,7 +1863,7 @@
     }
     if ( instr->num_post_match_opnds() != 0
          || instr->is_chain_of_constant(_globalNames) ) {
-      fprintf(fp,"  friend MachNode *State::MachNodeGenerator(int opcode, Compile* C);\n");
+      fprintf(fp,"  friend MachNode *State::MachNodeGenerator(int opcode);\n");
     }
     if ( instr->rematerialize(_globalNames, get_registers()) ) {
       fprintf(fp,"  // Rematerialize %s\n", instr->_ident);
@@ -2071,8 +2071,8 @@
   fprintf(fp,"  DEBUG_ONLY( ~State(void); )       // Destructor\n");
   fprintf(fp,"\n");
   fprintf(fp,"  // Methods created by ADLC and invoked by Reduce\n");
-  fprintf(fp,"  MachOper *MachOperGenerator( int opcode, Compile* C );\n");
-  fprintf(fp,"  MachNode *MachNodeGenerator( int opcode, Compile* C );\n");
+  fprintf(fp,"  MachOper *MachOperGenerator(int opcode);\n");
+  fprintf(fp,"  MachNode *MachNodeGenerator(int opcode);\n");
   fprintf(fp,"\n");
   fprintf(fp,"  // Assign a state to a node, definition of method produced by ADLC\n");
   fprintf(fp,"  bool DFA( int opcode, const Node *ideal );\n");
--- a/hotspot/src/share/vm/asm/codeBuffer.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/asm/codeBuffer.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -269,7 +269,7 @@
 
 GrowableArray<int>* CodeBuffer::create_patch_overflow() {
   if (_overflow_arena == NULL) {
-    _overflow_arena = new (mtCode) Arena();
+    _overflow_arena = new (mtCode) Arena(mtCode);
   }
   return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0);
 }
--- a/hotspot/src/share/vm/c1/c1_Compiler.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/c1/c1_Compiler.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -48,7 +48,7 @@
 
 void Compiler::init_c1_runtime() {
   BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
-  Arena* arena = new (mtCompiler) Arena();
+  Arena* arena = new (mtCompiler) Arena(mtCompiler);
   Runtime1::initialize(buffer_blob);
   FrameMap::initialize();
   // initialize data structures
--- a/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
 #include "c1/c1_MacroAssembler.hpp"
 #include "c1/c1_ValueStack.hpp"
 #include "ci/ciInstance.hpp"
+#include "runtime/os.hpp"
 
 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
   // we must have enough patching space so that call can be inserted
@@ -848,7 +849,7 @@
           stringStream st;
           st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
 #ifdef SPARC
-          _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
+          _masm->_verify_oop(r->as_Register(), os::strdup(st.as_string(), mtCompiler), __FILE__, __LINE__);
 #else
           _masm->verify_oop(r->as_Register());
 #endif
--- a/hotspot/src/share/vm/c1/c1_LinearScan.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/c1/c1_LinearScan.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1613,25 +1613,22 @@
   Interval* precolored_cpu_intervals, *not_precolored_cpu_intervals;
   Interval* precolored_fpu_intervals, *not_precolored_fpu_intervals;
 
-  create_unhandled_lists(&precolored_cpu_intervals, &not_precolored_cpu_intervals, is_precolored_cpu_interval, is_virtual_cpu_interval);
-  if (has_fpu_registers()) {
-    create_unhandled_lists(&precolored_fpu_intervals, &not_precolored_fpu_intervals, is_precolored_fpu_interval, is_virtual_fpu_interval);
-#ifdef ASSERT
-  } else {
-    // fpu register allocation is omitted because no virtual fpu registers are present
-    // just check this again...
-    create_unhandled_lists(&precolored_fpu_intervals, &not_precolored_fpu_intervals, is_precolored_fpu_interval, is_virtual_fpu_interval);
-    assert(not_precolored_fpu_intervals == Interval::end(), "missed an uncolored fpu interval");
-#endif
-  }
-
   // allocate cpu registers
+  create_unhandled_lists(&precolored_cpu_intervals, &not_precolored_cpu_intervals,
+                         is_precolored_cpu_interval, is_virtual_cpu_interval);
+
+  // allocate fpu registers
+  create_unhandled_lists(&precolored_fpu_intervals, &not_precolored_fpu_intervals,
+                         is_precolored_fpu_interval, is_virtual_fpu_interval);
+
+  // the fpu interval allocation cannot be moved down below with the fpu section as
+  // the cpu_lsw.walk() changes interval positions.
+
   LinearScanWalker cpu_lsw(this, precolored_cpu_intervals, not_precolored_cpu_intervals);
   cpu_lsw.walk();
   cpu_lsw.finish_allocation();
 
   if (has_fpu_registers()) {
-    // allocate fpu registers
     LinearScanWalker fpu_lsw(this, precolored_fpu_intervals, not_precolored_fpu_intervals);
     fpu_lsw.walk();
     fpu_lsw.finish_allocation();
--- a/hotspot/src/share/vm/ci/ciEnv.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -86,7 +86,8 @@
 
 // ------------------------------------------------------------------
 // ciEnv::ciEnv
-ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter) {
+ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter)
+  : _ciEnv_arena(mtCompiler) {
   VM_ENTRY_MARK;
 
   // Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
@@ -144,7 +145,7 @@
   _jvmti_can_pop_frame = false;
 }
 
-ciEnv::ciEnv(Arena* arena) {
+ciEnv::ciEnv(Arena* arena) : _ciEnv_arena(mtCompiler) {
   ASSERT_IN_VM;
 
   // Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
--- a/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -112,7 +112,7 @@
   // This Arena is long lived and exists in the resource mark of the
   // compiler thread that initializes the initial ciObjectFactory which
   // creates the shared ciObjects that all later ciObjectFactories use.
-  Arena* arena = new (mtCompiler) Arena();
+  Arena* arena = new (mtCompiler) Arena(mtCompiler);
   ciEnv initial(arena);
   ciEnv* env = ciEnv::current();
   env->_factory->init_shared_objects();
--- a/hotspot/src/share/vm/classfile/classLoader.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/classfile/classLoader.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -273,13 +273,17 @@
 }
 
 LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st) : ClassPathEntry() {
-  _path = strdup(path);
+  _path = os::strdup_check_oom(path);
   _st = *st;
   _meta_index = NULL;
   _resolved_entry = NULL;
   _has_error = false;
 }
 
+LazyClassPathEntry::~LazyClassPathEntry() {
+  os::free(_path);
+}
+
 bool LazyClassPathEntry::is_jar_file() {
   return ((_st.st_mode & S_IFREG) == S_IFREG);
 }
@@ -416,7 +420,7 @@
         default:
         {
           if (!skipCurrentJar && cur_entry != NULL) {
-            char* new_name = strdup(package_name);
+            char* new_name = os::strdup_check_oom(package_name);
             boot_class_path_packages.append(new_name);
           }
         }
@@ -438,7 +442,7 @@
 
 void ClassLoader::setup_bootstrap_search_path() {
   assert(_first_entry == NULL, "should not setup bootstrap class search path twice");
-  char* sys_class_path = os::strdup(Arguments::get_sysclasspath());
+  char* sys_class_path = os::strdup_check_oom(Arguments::get_sysclasspath());
   if (TraceClassLoading && Verbose) {
     tty->print_cr("[Bootstrap loader class path=%s]", sys_class_path);
   }
@@ -460,6 +464,7 @@
       end++;
     }
   }
+  os::free(sys_class_path);
 }
 
 ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st, bool lazy, TRAPS) {
--- a/hotspot/src/share/vm/classfile/classLoader.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/classfile/classLoader.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -128,6 +128,8 @@
   bool is_jar_file();
   const char* name()  { return _path; }
   LazyClassPathEntry(char* path, const struct stat* st);
+  virtual ~LazyClassPathEntry();
+
   ClassFileStream* open_stream(const char* name, TRAPS);
   void set_meta_index(MetaIndex* meta_index) { _meta_index = meta_index; }
   virtual bool is_lazy();
--- a/hotspot/src/share/vm/classfile/stackMapFrame.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/classfile/stackMapFrame.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,21 +54,6 @@
   return frame;
 }
 
-bool StackMapFrame::has_new_object() const {
-  int32_t i;
-  for (i = 0; i < _max_locals; i++) {
-    if (_locals[i].is_uninitialized()) {
-      return true;
-    }
-  }
-  for (i = 0; i < _stack_size; i++) {
-    if (_stack[i].is_uninitialized()) {
-      return true;
-    }
-  }
-  return false;
-}
-
 void StackMapFrame::initialize_object(
     VerificationType old_object, VerificationType new_object) {
   int32_t i;
--- a/hotspot/src/share/vm/classfile/stackMapFrame.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/classfile/stackMapFrame.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -155,10 +155,6 @@
     const methodHandle m, VerificationType thisKlass, TRAPS);
 
   // Search local variable type array and stack type array.
-  // Return true if an uninitialized object is found.
-  bool has_new_object() const;
-
-  // Search local variable type array and stack type array.
   // Set every element with type of old_object to new_object.
   void initialize_object(
     VerificationType old_object, VerificationType new_object);
--- a/hotspot/src/share/vm/classfile/stackMapTable.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/classfile/stackMapTable.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -70,24 +70,26 @@
 
 bool StackMapTable::match_stackmap(
     StackMapFrame* frame, int32_t target,
-    bool match, bool update, ErrorContext* ctx, TRAPS) const {
+    bool match, bool update, bool handler, ErrorContext* ctx, TRAPS) const {
   int index = get_index_from_offset(target);
-  return match_stackmap(frame, target, index, match, update, ctx, THREAD);
+  return match_stackmap(frame, target, index, match, update, handler, ctx, THREAD);
 }
 
 // Match and/or update current_frame to the frame in stackmap table with
 // specified offset and frame index. Return true if the two frames match.
+// handler is true if the frame in stackmap_table is for an exception handler.
 //
-// The values of match and update are:                  _match__update_
+// The values of match and update are:                  _match__update__handler
 //
-// checking a branch target/exception handler:           true   false
+// checking a branch target:                             true   false   false
+// checking an exception handler:                        true   false   true
 // linear bytecode verification following an
-// unconditional branch:                                 false  true
+// unconditional branch:                                 false  true    false
 // linear bytecode verification not following an
-// unconditional branch:                                 true   true
+// unconditional branch:                                 true   true    false
 bool StackMapTable::match_stackmap(
     StackMapFrame* frame, int32_t target, int32_t frame_index,
-    bool match, bool update, ErrorContext* ctx, TRAPS) const {
+    bool match, bool update, bool handler, ErrorContext* ctx, TRAPS) const {
   if (frame_index < 0 || frame_index >= _frame_count) {
     *ctx = ErrorContext::missing_stackmap(frame->offset());
     frame->verifier()->verify_error(
@@ -98,11 +100,9 @@
   StackMapFrame *stackmap_frame = _frame_array[frame_index];
   bool result = true;
   if (match) {
-    // when checking handler target, match == true && update == false
-    bool is_exception_handler = !update;
     // Has direct control flow from last instruction, need to match the two
     // frames.
-    result = frame->is_assignable_to(stackmap_frame, is_exception_handler,
+    result = frame->is_assignable_to(stackmap_frame, handler,
         ctx, CHECK_VERIFY_(frame->verifier(), result));
   }
   if (update) {
@@ -126,24 +126,10 @@
     StackMapFrame* frame, int32_t target, TRAPS) const {
   ErrorContext ctx;
   bool match = match_stackmap(
-    frame, target, true, false, &ctx, CHECK_VERIFY(frame->verifier()));
+    frame, target, true, false, false, &ctx, CHECK_VERIFY(frame->verifier()));
   if (!match || (target < 0 || target >= _code_length)) {
     frame->verifier()->verify_error(ctx,
         "Inconsistent stackmap frames at branch target %d", target);
-    return;
-  }
-  // check if uninitialized objects exist on backward branches
-  check_new_object(frame, target, CHECK_VERIFY(frame->verifier()));
-  frame->verifier()->update_furthest_jump(target);
-}
-
-void StackMapTable::check_new_object(
-    const StackMapFrame* frame, int32_t target, TRAPS) const {
-  if (frame->offset() > target && frame->has_new_object()) {
-    frame->verifier()->verify_error(
-        ErrorContext::bad_code(frame->offset()),
-        "Uninitialized object exists on backward branch %d", target);
-    return;
   }
 }
 
--- a/hotspot/src/share/vm/classfile/stackMapTable.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/classfile/stackMapTable.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -60,12 +60,12 @@
   // specified offset. Return true if the two frames match.
   bool match_stackmap(
     StackMapFrame* current_frame, int32_t offset,
-    bool match, bool update, ErrorContext* ctx, TRAPS) const;
+    bool match, bool update, bool handler, ErrorContext* ctx, TRAPS) const;
   // Match and/or update current_frame to the frame in stackmap table with
   // specified offset and frame index. Return true if the two frames match.
   bool match_stackmap(
     StackMapFrame* current_frame, int32_t offset, int32_t frame_index,
-    bool match, bool update, ErrorContext* ctx, TRAPS) const;
+    bool match, bool update, bool handler, ErrorContext* ctx, TRAPS) const;
 
   // Check jump instructions. Make sure there are no uninitialized
   // instances on backward branch.
@@ -76,10 +76,6 @@
   // Returns the frame array index where the frame with offset is stored.
   int get_index_from_offset(int32_t offset) const;
 
-  // Make sure that there's no uninitialized object exist on backward branch.
-  void check_new_object(
-    const StackMapFrame* frame, int32_t target, TRAPS) const;
-
   void print_on(outputStream* str) const;
 };
 
--- a/hotspot/src/share/vm/classfile/symbolTable.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/classfile/symbolTable.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -70,9 +70,9 @@
 void SymbolTable::initialize_symbols(int arena_alloc_size) {
   // Initialize the arena for global symbols, size passed in depends on CDS.
   if (arena_alloc_size == 0) {
-    _arena = new (mtSymbol) Arena();
+    _arena = new (mtSymbol) Arena(mtSymbol);
   } else {
-    _arena = new (mtSymbol) Arena(arena_alloc_size);
+    _arena = new (mtSymbol) Arena(mtSymbol, arena_alloc_size);
   }
 }
 
--- a/hotspot/src/share/vm/classfile/verifier.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/classfile/verifier.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -620,8 +620,6 @@
                                 // flow from current instruction to the next
                                 // instruction in sequence
 
-  set_furthest_jump(0);
-
   Bytecodes::Code opcode;
   while (!bcs.is_last_bytecode()) {
     // Check for recursive re-verification before each bytecode.
@@ -1780,7 +1778,7 @@
       // If matched, current_frame will be updated by this method.
       bool matches = stackmap_table->match_stackmap(
         current_frame, this_offset, stackmap_index,
-        !no_control_flow, true, &ctx, CHECK_VERIFY_(this, 0));
+        !no_control_flow, true, false, &ctx, CHECK_VERIFY_(this, 0));
       if (!matches) {
         // report type error
         verify_error(ctx, "Instruction type does not match stack map");
@@ -1827,7 +1825,7 @@
       }
       ErrorContext ctx;
       bool matches = stackmap_table->match_stackmap(
-        new_frame, handler_pc, true, false, &ctx, CHECK_VERIFY(this));
+        new_frame, handler_pc, true, false, true, &ctx, CHECK_VERIFY(this));
       if (!matches) {
         verify_error(ctx, "Stack map does not match the one at "
             "exception handler %d", handler_pc);
@@ -2219,6 +2217,181 @@
   }
 }
 
+// Look at the method's handlers.  If the bci is in the handler's try block
+// then check if the handler_pc is already on the stack.  If not, push it.
+void ClassVerifier::push_handlers(ExceptionTable* exhandlers,
+                                  GrowableArray<u4>* handler_stack,
+                                  u4 bci) {
+  int exlength = exhandlers->length();
+  for(int x = 0; x < exlength; x++) {
+    if (bci >= exhandlers->start_pc(x) && bci < exhandlers->end_pc(x)) {
+      handler_stack->append_if_missing(exhandlers->handler_pc(x));
+    }
+  }
+}
+
+// Return TRUE if all code paths starting with start_bc_offset end in
+// bytecode athrow or loop.
+bool ClassVerifier::ends_in_athrow(u4 start_bc_offset) {
+  ResourceMark rm;
+  // Create bytecode stream.
+  RawBytecodeStream bcs(method());
+  u4 code_length = method()->code_size();
+  bcs.set_start(start_bc_offset);
+  u4 target;
+  // Create stack for storing bytecode start offsets for if* and *switch.
+  GrowableArray<u4>* bci_stack = new GrowableArray<u4>(30);
+  // Create stack for handlers for try blocks containing this handler.
+  GrowableArray<u4>* handler_stack = new GrowableArray<u4>(30);
+  // Create list of visited branch opcodes (goto* and if*).
+  GrowableArray<u4>* visited_branches = new GrowableArray<u4>(30);
+  ExceptionTable exhandlers(_method());
+
+  while (true) {
+    if (bcs.is_last_bytecode()) {
+      // if no more starting offsets to parse or if at the end of the
+      // method then return false.
+      if ((bci_stack->is_empty()) || ((u4)bcs.end_bci() == code_length))
+        return false;
+      // Pop a bytecode starting offset and scan from there.
+      bcs.set_start(bci_stack->pop());
+    }
+    Bytecodes::Code opcode = bcs.raw_next();
+    u4 bci = bcs.bci();
+
+    // If the bytecode is in a TRY block, push its handlers so they
+    // will get parsed.
+    push_handlers(&exhandlers, handler_stack, bci);
+
+    switch (opcode) {
+      case Bytecodes::_if_icmpeq:
+      case Bytecodes::_if_icmpne:
+      case Bytecodes::_if_icmplt:
+      case Bytecodes::_if_icmpge:
+      case Bytecodes::_if_icmpgt:
+      case Bytecodes::_if_icmple:
+      case Bytecodes::_ifeq:
+      case Bytecodes::_ifne:
+      case Bytecodes::_iflt:
+      case Bytecodes::_ifge:
+      case Bytecodes::_ifgt:
+      case Bytecodes::_ifle:
+      case Bytecodes::_if_acmpeq:
+      case Bytecodes::_if_acmpne:
+      case Bytecodes::_ifnull:
+      case Bytecodes::_ifnonnull:
+        target = bcs.dest();
+        if (visited_branches->contains(bci)) {
+          if (bci_stack->is_empty()) return true;
+          // Pop a bytecode starting offset and scan from there.
+          bcs.set_start(bci_stack->pop());
+        } else {
+          if (target > bci) { // forward branch
+            if (target >= code_length) return false;
+            // Push the branch target onto the stack.
+            bci_stack->push(target);
+            // then, scan bytecodes starting with next.
+            bcs.set_start(bcs.next_bci());
+          } else { // backward branch
+            // Push bytecode offset following backward branch onto the stack.
+            bci_stack->push(bcs.next_bci());
+            // Check bytecodes starting with branch target.
+            bcs.set_start(target);
+          }
+          // Record target so we don't branch here again.
+          visited_branches->append(bci);
+        }
+        break;
+
+      case Bytecodes::_goto:
+      case Bytecodes::_goto_w:
+        target = (opcode == Bytecodes::_goto ? bcs.dest() : bcs.dest_w());
+        if (visited_branches->contains(bci)) {
+          if (bci_stack->is_empty()) return true;
+          // Been here before, pop new starting offset from stack.
+          bcs.set_start(bci_stack->pop());
+        } else {
+          if (target >= code_length) return false;
+          // Continue scanning from the target onward.
+          bcs.set_start(target);
+          // Record target so we don't branch here again.
+          visited_branches->append(bci);
+        }
+        break;
+
+      // Check that all switch alternatives end in 'athrow' bytecodes. Since it
+      // is  difficult to determine where each switch alternative ends, parse
+      // each switch alternative until either hit a 'return', 'athrow', or reach
+      // the end of the method's bytecodes.  This is gross but should be okay
+      // because:
+      // 1. tableswitch and lookupswitch byte codes in handlers for ctor explicit
+      //    constructor invocations should be rare.
+      // 2. if each switch alternative ends in an athrow then the parsing should be
+      //    short.  If there is no athrow then it is bogus code, anyway.
+      case Bytecodes::_lookupswitch:
+      case Bytecodes::_tableswitch:
+        {
+          address aligned_bcp = (address) round_to((intptr_t)(bcs.bcp() + 1), jintSize);
+          u4 default_offset = Bytes::get_Java_u4(aligned_bcp) + bci;
+          int keys, delta;
+          if (opcode == Bytecodes::_tableswitch) {
+            jint low = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize);
+            jint high = (jint)Bytes::get_Java_u4(aligned_bcp + 2*jintSize);
+            // This is invalid, but let the regular bytecode verifier
+            // report this because the user will get a better error message.
+            if (low > high) return true;
+            keys = high - low + 1;
+            delta = 1;
+          } else {
+            keys = (int)Bytes::get_Java_u4(aligned_bcp + jintSize);
+            delta = 2;
+          }
+          // Invalid, let the regular bytecode verifier deal with it.
+          if (keys < 0) return true;
+
+          // Push the offset of the next bytecode onto the stack.
+          bci_stack->push(bcs.next_bci());
+
+          // Push the switch alternatives onto the stack.
+          for (int i = 0; i < keys; i++) {
+            u4 target = bci + (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
+            if (target > code_length) return false;
+            bci_stack->push(target);
+          }
+
+          // Start bytecode parsing for the switch at the default alternative.
+          if (default_offset > code_length) return false;
+          bcs.set_start(default_offset);
+          break;
+        }
+
+      case Bytecodes::_return:
+        return false;
+
+      case Bytecodes::_athrow:
+        {
+          if (bci_stack->is_empty()) {
+            if (handler_stack->is_empty()) {
+              return true;
+            } else {
+              // Parse the catch handlers for try blocks containing athrow.
+              bcs.set_start(handler_stack->pop());
+            }
+          } else {
+            // Pop a bytecode offset and starting scanning from there.
+            bcs.set_start(bci_stack->pop());
+          }
+        }
+        break;
+
+      default:
+        ;
+    } // end switch
+  } // end while loop
+
+  return false;
+}
+
 void ClassVerifier::verify_invoke_init(
     RawBytecodeStream* bcs, u2 ref_class_index, VerificationType ref_class_type,
     StackMapFrame* current_frame, u4 code_length, bool *this_uninit,
@@ -2238,25 +2411,26 @@
       return;
     }
 
-    // Make sure that this call is not jumped over.
-    if (bci < furthest_jump()) {
-      verify_error(ErrorContext::bad_code(bci),
-                   "Bad <init> method call from inside of a branch");
-      return;
-    }
-
-    // Make sure that this call is not done from within a TRY block because
-    // that can result in returning an incomplete object.  Simply checking
-    // (bci >= start_pc) also ensures that this call is not done after a TRY
-    // block.  That is also illegal because this call must be the first Java
-    // statement in the constructor.
+    // Check if this call is done from inside of a TRY block.  If so, make
+    // sure that all catch clause paths end in a throw.  Otherwise, this
+    // can result in returning an incomplete object.
     ExceptionTable exhandlers(_method());
     int exlength = exhandlers.length();
     for(int i = 0; i < exlength; i++) {
-      if (bci >= exhandlers.start_pc(i)) {
-        verify_error(ErrorContext::bad_code(bci),
-                     "Bad <init> method call from after the start of a try block");
-        return;
+      u2 start_pc = exhandlers.start_pc(i);
+      u2 end_pc = exhandlers.end_pc(i);
+
+      if (bci >= start_pc && bci < end_pc) {
+        if (!ends_in_athrow(exhandlers.handler_pc(i))) {
+          verify_error(ErrorContext::bad_code(bci),
+            "Bad <init> method call from after the start of a try block");
+          return;
+        } else if (VerboseVerification) {
+          ResourceMark rm;
+          tty->print_cr(
+            "Survived call to ends_in_athrow(): %s",
+                        current_class()->name()->as_C_string());
+        }
       }
     }
 
--- a/hotspot/src/share/vm/classfile/verifier.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/classfile/verifier.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
 #include "oops/klass.hpp"
 #include "oops/method.hpp"
 #include "runtime/handles.hpp"
+#include "utilities/growableArray.hpp"
 #include "utilities/exceptions.hpp"
 
 // The verifier class
@@ -258,9 +259,6 @@
 
   ErrorContext _error_context;  // contains information about an error
 
-  // Used to detect illegal jumps over calls to super() nd this() in ctors.
-  int32_t _furthest_jump;
-
   void verify_method(methodHandle method, TRAPS);
   char* generate_code_data(methodHandle m, u4 code_length, TRAPS);
   void verify_exception_handler_table(u4 code_length, char* code_data,
@@ -306,6 +304,16 @@
     StackMapFrame* current_frame, u4 code_length, bool* this_uninit,
     constantPoolHandle cp, TRAPS);
 
+  // Used by ends_in_athrow() to push all handlers that contain bci onto
+  // the handler_stack, if the handler is not already on the stack.
+  void push_handlers(ExceptionTable* exhandlers,
+                     GrowableArray<u4>* handler_stack,
+                     u4 bci);
+
+  // Returns true if all paths starting with start_bc_offset end in athrow
+  // bytecode or loop.
+  bool ends_in_athrow(u4 start_bc_offset);
+
   void verify_invoke_instructions(
     RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame,
     bool* this_uninit, VerificationType return_type,
@@ -407,19 +415,6 @@
 
   TypeOrigin ref_ctx(const char* str, TRAPS);
 
-  // Keep track of the furthest branch done in a method to make sure that
-  // there are no branches over calls to super() or this() from inside of
-  // a constructor.
-  int32_t furthest_jump() { return _furthest_jump; }
-
-  void set_furthest_jump(int32_t target) {
-    _furthest_jump = target;
-  }
-
-  void update_furthest_jump(int32_t target) {
-    if (target > _furthest_jump) _furthest_jump = target;
-  }
-
 };
 
 inline int ClassVerifier::change_sig_to_verificationType(
--- a/hotspot/src/share/vm/code/dependencies.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/code/dependencies.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -407,56 +407,66 @@
 // for the sake of the compiler log, print out current dependencies:
 void Dependencies::log_all_dependencies() {
   if (log() == NULL)  return;
-  ciBaseObject* args[max_arg_count];
+  ResourceMark rm;
   for (int deptv = (int)FIRST_TYPE; deptv < (int)TYPE_LIMIT; deptv++) {
     DepType dept = (DepType)deptv;
     GrowableArray<ciBaseObject*>* deps = _deps[dept];
-    if (deps->length() == 0)  continue;
+    int deplen = deps->length();
+    if (deplen == 0) {
+      continue;
+    }
     int stride = dep_args(dept);
+    GrowableArray<ciBaseObject*>* ciargs = new GrowableArray<ciBaseObject*>(stride);
     for (int i = 0; i < deps->length(); i += stride) {
       for (int j = 0; j < stride; j++) {
         // flush out the identities before printing
-        args[j] = deps->at(i+j);
+        ciargs->push(deps->at(i+j));
       }
-      write_dependency_to(log(), dept, stride, args);
+      write_dependency_to(log(), dept, ciargs);
+      ciargs->clear();
     }
+    guarantee(deplen == deps->length(), "deps array cannot grow inside nested ResoureMark scope");
   }
 }
 
 void Dependencies::write_dependency_to(CompileLog* log,
                                        DepType dept,
-                                       int nargs, DepArgument args[],
+                                       GrowableArray<DepArgument>* args,
                                        Klass* witness) {
   if (log == NULL) {
     return;
   }
+  ResourceMark rm;
   ciEnv* env = ciEnv::current();
-  ciBaseObject* ciargs[max_arg_count];
-  assert(nargs <= max_arg_count, "oob");
-  for (int j = 0; j < nargs; j++) {
-    if (args[j].is_oop()) {
-      ciargs[j] = env->get_object(args[j].oop_value());
+  GrowableArray<ciBaseObject*>* ciargs = new GrowableArray<ciBaseObject*>(args->length());
+  for (GrowableArrayIterator<DepArgument> it = args->begin(); it != args->end(); ++it) {
+    DepArgument arg = *it;
+    if (arg.is_oop()) {
+      ciargs->push(env->get_object(arg.oop_value()));
     } else {
-      ciargs[j] = env->get_metadata(args[j].metadata_value());
+      ciargs->push(env->get_metadata(arg.metadata_value()));
     }
   }
-  Dependencies::write_dependency_to(log, dept, nargs, ciargs, witness);
+  int argslen = ciargs->length();
+  Dependencies::write_dependency_to(log, dept, ciargs, witness);
+  guarantee(argslen == ciargs->length(), "ciargs array cannot grow inside nested ResoureMark scope");
 }
 
 void Dependencies::write_dependency_to(CompileLog* log,
                                        DepType dept,
-                                       int nargs, ciBaseObject* args[],
+                                       GrowableArray<ciBaseObject*>* args,
                                        Klass* witness) {
-  if (log == NULL)  return;
-  assert(nargs <= max_arg_count, "oob");
-  int argids[max_arg_count];
-  int ctxkj = dep_context_arg(dept);  // -1 if no context arg
-  int j;
-  for (j = 0; j < nargs; j++) {
-    if (args[j]->is_object()) {
-      argids[j] = log->identify(args[j]->as_object());
+  if (log == NULL) {
+    return;
+  }
+  ResourceMark rm;
+  GrowableArray<int>* argids = new GrowableArray<int>(args->length());
+  for (GrowableArrayIterator<ciBaseObject*> it = args->begin(); it != args->end(); ++it) {
+    ciBaseObject* obj = *it;
+    if (obj->is_object()) {
+      argids->push(log->identify(obj->as_object()));
     } else {
-      argids[j] = log->identify(args[j]->as_metadata());
+      argids->push(log->identify(obj->as_metadata()));
     }
   }
   if (witness != NULL) {
@@ -465,16 +475,17 @@
     log->begin_elem("dependency");
   }
   log->print(" type='%s'", dep_name(dept));
-  if (ctxkj >= 0) {
-    log->print(" ctxk='%d'", argids[ctxkj]);
+  const int ctxkj = dep_context_arg(dept);  // -1 if no context arg
+  if (ctxkj >= 0 && ctxkj < argids->length()) {
+    log->print(" ctxk='%d'", argids->at(ctxkj));
   }
   // write remaining arguments, if any.
-  for (j = 0; j < nargs; j++) {
+  for (int j = 0; j < argids->length(); j++) {
     if (j == ctxkj)  continue;  // already logged
     if (j == 1) {
-      log->print(  " x='%d'",    argids[j]);
+      log->print(  " x='%d'",    argids->at(j));
     } else {
-      log->print(" x%d='%d'", j, argids[j]);
+      log->print(" x%d='%d'", j, argids->at(j));
     }
   }
   if (witness != NULL) {
@@ -486,9 +497,12 @@
 
 void Dependencies::write_dependency_to(xmlStream* xtty,
                                        DepType dept,
-                                       int nargs, DepArgument args[],
+                                       GrowableArray<DepArgument>* args,
                                        Klass* witness) {
-  if (xtty == NULL)  return;
+  if (xtty == NULL) {
+    return;
+  }
+  ResourceMark rm;
   ttyLocker ttyl;
   int ctxkj = dep_context_arg(dept);  // -1 if no context arg
   if (witness != NULL) {
@@ -498,23 +512,24 @@
   }
   xtty->print(" type='%s'", dep_name(dept));
   if (ctxkj >= 0) {
-    xtty->object("ctxk", args[ctxkj].metadata_value());
+    xtty->object("ctxk", args->at(ctxkj).metadata_value());
   }
   // write remaining arguments, if any.
-  for (int j = 0; j < nargs; j++) {
+  for (int j = 0; j < args->length(); j++) {
     if (j == ctxkj)  continue;  // already logged
+    DepArgument arg = args->at(j);
     if (j == 1) {
-      if (args[j].is_oop()) {
-        xtty->object("x", args[j].oop_value());
+      if (arg.is_oop()) {
+        xtty->object("x", arg.oop_value());
       } else {
-        xtty->object("x", args[j].metadata_value());
+        xtty->object("x", arg.metadata_value());
       }
     } else {
       char xn[10]; sprintf(xn, "x%d", j);
-      if (args[j].is_oop()) {
-        xtty->object(xn, args[j].oop_value());
+      if (arg.is_oop()) {
+        xtty->object(xn, arg.oop_value());
       } else {
-        xtty->object(xn, args[j].metadata_value());
+        xtty->object(xn, arg.metadata_value());
       }
     }
   }
@@ -525,7 +540,7 @@
   xtty->end_elem();
 }
 
-void Dependencies::print_dependency(DepType dept, int nargs, DepArgument args[],
+void Dependencies::print_dependency(DepType dept, GrowableArray<DepArgument>* args,
                                     Klass* witness) {
   ResourceMark rm;
   ttyLocker ttyl;   // keep the following output all in one block
@@ -534,8 +549,8 @@
                 dep_name(dept));
   // print arguments
   int ctxkj = dep_context_arg(dept);  // -1 if no context arg
-  for (int j = 0; j < nargs; j++) {
-    DepArgument arg = args[j];
+  for (int j = 0; j < args->length(); j++) {
+    DepArgument arg = args->at(j);
     bool put_star = false;
     if (arg.is_null())  continue;
     const char* what;
@@ -571,31 +586,33 @@
 void Dependencies::DepStream::log_dependency(Klass* witness) {
   if (_deps == NULL && xtty == NULL)  return;  // fast cutout for runtime
   ResourceMark rm;
-  int nargs = argument_count();
-  DepArgument args[max_arg_count];
+  const int nargs = argument_count();
+  GrowableArray<DepArgument>* args = new GrowableArray<DepArgument>(nargs);
   for (int j = 0; j < nargs; j++) {
     if (type() == call_site_target_value) {
-      args[j] = argument_oop(j);
+      args->push(argument_oop(j));
     } else {
-      args[j] = argument(j);
+      args->push(argument(j));
     }
   }
+  int argslen = args->length();
   if (_deps != NULL && _deps->log() != NULL) {
-    Dependencies::write_dependency_to(_deps->log(),
-                                      type(), nargs, args, witness);
+    Dependencies::write_dependency_to(_deps->log(), type(), args, witness);
   } else {
-    Dependencies::write_dependency_to(xtty,
-                                      type(), nargs, args, witness);
+    Dependencies::write_dependency_to(xtty, type(), args, witness);
   }
+  guarantee(argslen == args->length(), "args array cannot grow inside nested ResoureMark scope");
 }
 
 void Dependencies::DepStream::print_dependency(Klass* witness, bool verbose) {
+  ResourceMark rm;
   int nargs = argument_count();
-  DepArgument args[max_arg_count];
+  GrowableArray<DepArgument>* args = new GrowableArray<DepArgument>(nargs);
   for (int j = 0; j < nargs; j++) {
-    args[j] = argument(j);
+    args->push(argument(j));
   }
-  Dependencies::print_dependency(type(), nargs, args, witness);
+  int argslen = args->length();
+  Dependencies::print_dependency(type(), args, witness);
   if (verbose) {
     if (_code != NULL) {
       tty->print("  code: ");
@@ -603,6 +620,7 @@
       tty->cr();
     }
   }
+  guarantee(argslen == args->length(), "args array cannot grow inside nested ResoureMark scope");
 }
 
 
--- a/hotspot/src/share/vm/code/dependencies.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/code/dependencies.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -369,20 +369,36 @@
   void copy_to(nmethod* nm);
 
   void log_all_dependencies();
-  void log_dependency(DepType dept, int nargs, ciBaseObject* args[]) {
-    write_dependency_to(log(), dept, nargs, args);
+
+  void log_dependency(DepType dept, GrowableArray<ciBaseObject*>* args) {
+    ResourceMark rm;
+    int argslen = args->length();
+    write_dependency_to(log(), dept, args);
+    guarantee(argslen == args->length(),
+              "args array cannot grow inside nested ResoureMark scope");
   }
+
   void log_dependency(DepType dept,
                       ciBaseObject* x0,
                       ciBaseObject* x1 = NULL,
                       ciBaseObject* x2 = NULL) {
-    if (log() == NULL)  return;
-    ciBaseObject* args[max_arg_count];
-    args[0] = x0;
-    args[1] = x1;
-    args[2] = x2;
-    assert(2 < max_arg_count, "");
-    log_dependency(dept, dep_args(dept), args);
+    if (log() == NULL) {
+      return;
+    }
+    ResourceMark rm;
+    GrowableArray<ciBaseObject*>* ciargs =
+                new GrowableArray<ciBaseObject*>(dep_args(dept));
+    assert (x0 != NULL, "no log x0");
+    ciargs->push(x0);
+
+    if (x1 != NULL) {
+      ciargs->push(x1);
+    }
+    if (x2 != NULL) {
+      ciargs->push(x2);
+    }
+    assert(ciargs->length() == dep_args(dept), "");
+    log_dependency(dept, ciargs);
   }
 
   class DepArgument : public ResourceObj {
@@ -405,20 +421,8 @@
     Metadata* metadata_value() const { assert(!_is_oop && _valid, "must be"); return (Metadata*) _value; }
   };
 
-  static void write_dependency_to(CompileLog* log,
-                                  DepType dept,
-                                  int nargs, ciBaseObject* args[],
-                                  Klass* witness = NULL);
-  static void write_dependency_to(CompileLog* log,
-                                  DepType dept,
-                                  int nargs, DepArgument args[],
-                                  Klass* witness = NULL);
-  static void write_dependency_to(xmlStream* xtty,
-                                  DepType dept,
-                                  int nargs, DepArgument args[],
-                                  Klass* witness = NULL);
   static void print_dependency(DepType dept,
-                               int nargs, DepArgument args[],
+                               GrowableArray<DepArgument>* args,
                                Klass* witness = NULL);
 
  private:
@@ -427,6 +431,18 @@
 
   static Klass* ctxk_encoded_as_null(DepType dept, Metadata* x);
 
+  static void write_dependency_to(CompileLog* log,
+                                  DepType dept,
+                                  GrowableArray<ciBaseObject*>* args,
+                                  Klass* witness = NULL);
+  static void write_dependency_to(CompileLog* log,
+                                  DepType dept,
+                                  GrowableArray<DepArgument>* args,
+                                  Klass* witness = NULL);
+  static void write_dependency_to(xmlStream* xtty,
+                                  DepType dept,
+                                  GrowableArray<DepArgument>* args,
+                                  Klass* witness = NULL);
  public:
   // Use this to iterate over an nmethod's dependency set.
   // Works on new and old dependency sets.
--- a/hotspot/src/share/vm/compiler/compilerOracle.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/compiler/compilerOracle.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -33,6 +33,7 @@
 #include "oops/symbol.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/jniHandles.hpp"
+#include "runtime/os.hpp"
 
 class MethodMatcher : public CHeapObj<mtCompiler> {
  public:
@@ -175,7 +176,11 @@
                              Symbol* method_name, Mode method_mode,
                              Symbol* signature, const char * opt, MethodMatcher* next):
     MethodMatcher(class_name, class_mode, method_name, method_mode, signature, next) {
-    option = opt;
+    option = os::strdup_check_oom(opt);
+  }
+
+  virtual ~MethodOptionMatcher() {
+    os::free((void*)option);
   }
 
   bool match(methodHandle method, const char* opt) {
@@ -498,7 +503,7 @@
           tty->print("CompilerOracle: %s ", command_names[command]);
           match->print();
         }
-        match = add_option_string(c_name, c_match, m_name, m_match, signature, strdup(option));
+        match = add_option_string(c_name, c_match, m_name, m_match, signature, option);
         line += bytes_read;
       }
     } else {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -52,7 +52,8 @@
 }
 
 void ConcurrentMarkSweepPolicy::initialize_generations() {
-  _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
+  _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC,
+    CURRENT_PC, AllocFailStrategy::RETURN_NULL);
   if (_generations == NULL)
     vm_exit_during_initialization("Unable to allocate gen spec");
 
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -5987,6 +5987,8 @@
 };
 
 void CMSRefProcTaskProxy::work(uint worker_id) {
+  ResourceMark rm;
+  HandleMark hm;
   assert(_collector->_span.equals(_span), "Inconsistency in _span");
   CMSParKeepAliveClosure par_keep_alive(_collector, _span,
                                         _mark_bit_map,
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -2167,7 +2167,9 @@
   g1h->increment_total_collections();
 
   // Clean out dead classes and update Metaspace sizes.
-  ClassLoaderDataGraph::purge();
+  if (ClassUnloadingWithConcurrentMark) {
+    ClassLoaderDataGraph::purge();
+  }
   MetaspaceGC::compute_new_size();
 
   // We reclaimed old regions so we should calculate the sizes to make
@@ -2403,6 +2405,8 @@
   }
 
   virtual void work(uint worker_id) {
+    ResourceMark rm;
+    HandleMark hm;
     CMTask* task = _cm->task(worker_id);
     G1CMIsAliveClosure g1_is_alive(_g1h);
     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
@@ -2595,24 +2599,27 @@
   assert(_markStack.isEmpty(), "Marking should have completed");
 
   // Unload Klasses, String, Symbols, Code Cache, etc.
-
-  G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
-
-  bool purged_classes;
-
   {
-    G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
-    purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
-  }
-
-  {
-    G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
-    weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
-  }
-
-  if (G1StringDedup::is_enabled()) {
-    G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
-    G1StringDedup::unlink(&g1_is_alive);
+    G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
+
+    if (ClassUnloadingWithConcurrentMark) {
+      bool purged_classes;
+
+      {
+        G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
+        purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
+      }
+
+      {
+        G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
+        weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
+      }
+    }
+
+    if (G1StringDedup::is_enabled()) {
+      G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
+      G1StringDedup::unlink(&g1_is_alive);
+    }
   }
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1926,6 +1926,8 @@
   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
+  _humongous_is_live(),
+  _has_humongous_reclaim_candidates(false),
   _free_regions_coming(false),
   _young_list(new YoungList(this)),
   _gc_time_stamp(0),
@@ -2082,6 +2084,7 @@
   _g1h = this;
 
   _in_cset_fast_test.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
+  _humongous_is_live.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
 
   // Create the ConcurrentMark data structure and thread.
   // (Must do this late, so that "max_regions" is defined.)
@@ -2177,6 +2180,11 @@
   }
 }
 
+void G1CollectedHeap::clear_humongous_is_live_table() {
+  guarantee(G1ReclaimDeadHumongousObjectsAtYoungGC, "Should only be called if true");
+  _humongous_is_live.clear();
+}
+
 size_t G1CollectedHeap::conservative_max_heap_alignment() {
   return HeapRegion::max_region_size();
 }
@@ -2574,15 +2582,12 @@
 
 // Iteration functions.
 
-// Iterates an OopClosure over all ref-containing fields of objects
-// within a HeapRegion.
+// Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
 
 class IterateOopClosureRegionClosure: public HeapRegionClosure {
-  MemRegion _mr;
   ExtendedOopClosure* _cl;
 public:
-  IterateOopClosureRegionClosure(MemRegion mr, ExtendedOopClosure* cl)
-    : _mr(mr), _cl(cl) {}
+  IterateOopClosureRegionClosure(ExtendedOopClosure* cl) : _cl(cl) {}
   bool doHeapRegion(HeapRegion* r) {
     if (!r->continuesHumongous()) {
       r->oop_iterate(_cl);
@@ -2592,12 +2597,7 @@
 };
 
 void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
-  IterateOopClosureRegionClosure blk(_g1_committed, cl);
-  heap_region_iterate(&blk);
-}
-
-void G1CollectedHeap::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
-  IterateOopClosureRegionClosure blk(mr, cl);
+  IterateOopClosureRegionClosure blk(cl);
   heap_region_iterate(&blk);
 }
 
@@ -3771,6 +3771,61 @@
   return g1_rem_set()->cardsScanned();
 }
 
+bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
+  HeapRegion* region = region_at(index);
+  assert(region->startsHumongous(), "Must start a humongous object");
+  return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
+}
+
+class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
+ private:
+  size_t _total_humongous;
+  size_t _candidate_humongous;
+ public:
+  RegisterHumongousWithInCSetFastTestClosure() : _total_humongous(0), _candidate_humongous(0) {
+  }
+
+  virtual bool doHeapRegion(HeapRegion* r) {
+    if (!r->startsHumongous()) {
+      return false;
+    }
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+    uint region_idx = r->hrs_index();
+    bool is_candidate = !g1h->humongous_region_is_always_live(region_idx);
+    // Is_candidate already filters out humongous regions with some remembered set.
+    // This will not lead to humongous object that we mistakenly keep alive because
+    // during young collection the remembered sets will only be added to.
+    if (is_candidate) {
+      g1h->register_humongous_region_with_in_cset_fast_test(region_idx);
+      _candidate_humongous++;
+    }
+    _total_humongous++;
+
+    return false;
+  }
+
+  size_t total_humongous() const { return _total_humongous; }
+  size_t candidate_humongous() const { return _candidate_humongous; }
+};
+
+void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() {
+  if (!G1ReclaimDeadHumongousObjectsAtYoungGC) {
+    g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0, 0);
+    return;
+  }
+
+  RegisterHumongousWithInCSetFastTestClosure cl;
+  heap_region_iterate(&cl);
+  g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(cl.total_humongous(),
+                                                                  cl.candidate_humongous());
+  _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
+
+  if (_has_humongous_reclaim_candidates) {
+    clear_humongous_is_live_table();
+  }
+}
+
 void
 G1CollectedHeap::setup_surviving_young_words() {
   assert(_surviving_young_words == NULL, "pre-condition");
@@ -4058,6 +4113,8 @@
 
         g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
 
+        register_humongous_regions_with_in_cset_fast_test();
+
         _cm->note_start_of_gc();
         // We should not verify the per-thread SATB buffers given that
         // we have not filtered them yet (we'll do so during the
@@ -4108,6 +4165,9 @@
                                  true  /* verify_fingers */);
 
         free_collection_set(g1_policy()->collection_set(), evacuation_info);
+
+        eagerly_reclaim_humongous_regions();
+
         g1_policy()->clear_collection_set();
 
         cleanup_surviving_young_words();
@@ -4608,7 +4668,9 @@
 
   assert(_worker_id == _par_scan_state->queue_num(), "sanity");
 
-  if (_g1->in_cset_fast_test(obj)) {
+  G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj);
+
+  if (state == G1CollectedHeap::InCSet) {
     oop forwardee;
     if (obj->is_forwarded()) {
       forwardee = obj->forwardee();
@@ -4627,6 +4689,9 @@
       do_klass_barrier(p, forwardee);
     }
   } else {
+    if (state == G1CollectedHeap::IsHumongous) {
+      _g1->set_humongous_is_live(obj);
+    }
     // The object is not in collection set. If we're a root scanning
     // closure during an initial mark pause then attempt to mark the object.
     if (do_mark_object == G1MarkFromRoot) {
@@ -4719,11 +4784,6 @@
   Mutex _stats_lock;
   Mutex* stats_lock() { return &_stats_lock; }
 
-  size_t getNCards() {
-    return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
-      / G1BlockOffsetSharedArray::N_bytes;
-  }
-
 public:
   G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues)
     : AbstractGangTask("G1 collection"),
@@ -4847,10 +4907,15 @@
       if (_g1h->g1_policy()->during_initial_mark_pause()) {
         // We also need to mark copied objects.
         strong_root_cl = &scan_mark_root_cl;
-        weak_root_cl   = &scan_mark_weak_root_cl;
         strong_cld_cl  = &scan_mark_cld_cl;
-        weak_cld_cl    = &scan_mark_weak_cld_cl;
         strong_code_cl = &scan_mark_code_cl;
+        if (ClassUnloadingWithConcurrentMark) {
+          weak_root_cl = &scan_mark_weak_root_cl;
+          weak_cld_cl  = &scan_mark_weak_cld_cl;
+        } else {
+          weak_root_cl = &scan_mark_root_cl;
+          weak_cld_cl  = &scan_mark_cld_cl;
+        }
       } else {
         strong_root_cl = &scan_only_root_cl;
         weak_root_cl   = &scan_only_root_cl;
@@ -4921,6 +4986,7 @@
   double closure_app_time_sec = 0.0;
 
   bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
+  bool trace_metadata = during_im && ClassUnloadingWithConcurrentMark;
 
   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
   BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
@@ -4930,8 +4996,8 @@
                 &buf_scan_non_heap_roots,
                 &buf_scan_non_heap_weak_roots,
                 scan_strong_clds,
-                // Initial Mark handles the weak CLDs separately.
-                (during_im ? NULL : scan_weak_clds),
+                // Unloading Initial Marks handle the weak CLDs separately.
+                (trace_metadata ? NULL : scan_weak_clds),
                 scan_strong_code);
 
   // Now the CM ref_processor roots.
@@ -4943,7 +5009,7 @@
     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
   }
 
-  if (during_im) {
+  if (trace_metadata) {
     // Barrier to make sure all workers passed
     // the strong CLD and strong nmethods phases.
     active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());
@@ -5450,12 +5516,21 @@
 public:
   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
-  void do_oop(      oop* p) {
+  void do_oop(oop* p) {
     oop obj = *p;
 
-    if (_g1->obj_in_cs(obj)) {
+    G1CollectedHeap::in_cset_state_t cset_state = _g1->in_cset_state(obj);
+    if (obj == NULL || cset_state == G1CollectedHeap::InNeither) {
+      return;
+    }
+    if (cset_state == G1CollectedHeap::InCSet) {
       assert( obj->is_forwarded(), "invariant" );
       *p = obj->forwardee();
+    } else {
+      assert(!obj->is_forwarded(), "invariant" );
+      assert(cset_state == G1CollectedHeap::IsHumongous,
+             err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state));
+      _g1->set_humongous_is_live(obj);
     }
   }
 };
@@ -5485,7 +5560,7 @@
   template <class T> void do_oop_work(T* p) {
     oop obj = oopDesc::load_decode_heap_oop(p);
 
-    if (_g1h->obj_in_cs(obj)) {
+    if (_g1h->is_in_cset_or_humongous(obj)) {
       // If the referent object has been forwarded (either copied
       // to a new location or to itself in the event of an
       // evacuation failure) then we need to update the reference
@@ -5510,10 +5585,10 @@
         assert(!Metaspace::contains((const void*)p),
                err_msg("Unexpectedly found a pointer from metadata: "
                               PTR_FORMAT, p));
-          _copy_non_heap_obj_cl->do_oop(p);
-        }
+        _copy_non_heap_obj_cl->do_oop(p);
       }
     }
+  }
 };
 
 // Serial drain queue closure. Called as the 'complete_gc'
@@ -6435,6 +6510,154 @@
   policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
 }
 
+class G1FreeHumongousRegionClosure : public HeapRegionClosure {
+ private:
+  FreeRegionList* _free_region_list;
+  HeapRegionSet* _proxy_set;
+  HeapRegionSetCount _humongous_regions_removed;
+  size_t _freed_bytes;
+ public:
+
+  G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
+    _free_region_list(free_region_list), _humongous_regions_removed(), _freed_bytes(0) {
+  }
+
+  virtual bool doHeapRegion(HeapRegion* r) {
+    if (!r->startsHumongous()) {
+      return false;
+    }
+
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+    oop obj = (oop)r->bottom();
+    CMBitMap* next_bitmap = g1h->concurrent_mark()->nextMarkBitMap();
+
+    // The following checks whether the humongous object is live are sufficient.
+    // The main additional check (in addition to having a reference from the roots
+    // or the young gen) is whether the humongous object has a remembered set entry.
+    //
+    // A humongous object cannot be live if there is no remembered set for it
+    // because:
+    // - there can be no references from within humongous starts regions referencing
+    // the object because we never allocate other objects into them.
+    // (I.e. there are no intra-region references that may be missed by the
+    // remembered set)
+    // - as soon there is a remembered set entry to the humongous starts region
+    // (i.e. it has "escaped" to an old object) this remembered set entry will stay
+    // until the end of a concurrent mark.
+    //
+    // It is not required to check whether the object has been found dead by marking
+    // or not, in fact it would prevent reclamation within a concurrent cycle, as
+    // all objects allocated during that time are considered live.
+    // SATB marking is even more conservative than the remembered set.
+    // So if at this point in the collection there is no remembered set entry,
+    // nobody has a reference to it.
+    // At the start of collection we flush all refinement logs, and remembered sets
+    // are completely up-to-date wrt to references to the humongous object.
+    //
+    // Other implementation considerations:
+    // - never consider object arrays: while they are a valid target, they have not
+    // been observed to be used as temporary objects.
+    // - they would also pose considerable effort for cleaning up the the remembered
+    // sets.
+    // While this cleanup is not strictly necessary to be done (or done instantly),
+    // given that their occurrence is very low, this saves us this additional
+    // complexity.
+    uint region_idx = r->hrs_index();
+    if (g1h->humongous_is_live(region_idx) ||
+        g1h->humongous_region_is_always_live(region_idx)) {
+
+      if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
+        gclog_or_tty->print_cr("Live humongous %d region %d with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
+                               r->isHumongous(),
+                               region_idx,
+                               r->rem_set()->occupied(),
+                               r->rem_set()->strong_code_roots_list_length(),
+                               next_bitmap->isMarked(r->bottom()),
+                               g1h->humongous_is_live(region_idx),
+                               obj->is_objArray()
+                              );
+      }
+
+      return false;
+    }
+
+    guarantee(!obj->is_objArray(),
+              err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.",
+                      r->bottom()));
+
+    if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
+      gclog_or_tty->print_cr("Reclaim humongous region %d start "PTR_FORMAT" region %d length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
+                             r->isHumongous(),
+                             r->bottom(),
+                             region_idx,
+                             r->region_num(),
+                             r->rem_set()->occupied(),
+                             r->rem_set()->strong_code_roots_list_length(),
+                             next_bitmap->isMarked(r->bottom()),
+                             g1h->humongous_is_live(region_idx),
+                             obj->is_objArray()
+                            );
+    }
+    // Need to clear mark bit of the humongous object if already set.
+    if (next_bitmap->isMarked(r->bottom())) {
+      next_bitmap->clear(r->bottom());
+    }
+    _freed_bytes += r->used();
+    r->set_containing_set(NULL);
+    _humongous_regions_removed.increment(1u, r->capacity());
+    g1h->free_humongous_region(r, _free_region_list, false);
+
+    return false;
+  }
+
+  HeapRegionSetCount& humongous_free_count() {
+    return _humongous_regions_removed;
+  }
+
+  size_t bytes_freed() const {
+    return _freed_bytes;
+  }
+
+  size_t humongous_reclaimed() const {
+    return _humongous_regions_removed.length();
+  }
+};
+
+void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
+  assert_at_safepoint(true);
+
+  if (!G1ReclaimDeadHumongousObjectsAtYoungGC || !_has_humongous_reclaim_candidates) {
+    g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
+    return;
+  }
+
+  double start_time = os::elapsedTime();
+
+  FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
+
+  G1FreeHumongousRegionClosure cl(&local_cleanup_list);
+  heap_region_iterate(&cl);
+
+  HeapRegionSetCount empty_set;
+  remove_from_old_sets(empty_set, cl.humongous_free_count());
+
+  G1HRPrinter* hr_printer = _g1h->hr_printer();
+  if (hr_printer->is_active()) {
+    FreeRegionListIterator iter(&local_cleanup_list);
+    while (iter.more_available()) {
+      HeapRegion* hr = iter.get_next();
+      hr_printer->cleanup(hr);
+    }
+  }
+
+  prepend_to_freelist(&local_cleanup_list);
+  decrement_summary_bytes(cl.bytes_freed());
+
+  g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
+                                                                    cl.humongous_reclaimed());
+}
+
 // This routine is similar to the above but does not record
 // any policy statistics or update free lists; we are abandoning
 // the current incremental collection set in preparation of a
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -197,16 +197,6 @@
   bool do_object_b(oop p);
 };
 
-// Instances of this class are used for quick tests on whether a reference points
-// into the collection set. Each of the array's elements denotes whether the
-// corresponding region is in the collection set.
-class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<bool> {
- protected:
-  bool default_value() const { return false; }
- public:
-  void clear() { G1BiasedMappedArray<bool>::clear(); }
-};
-
 class RefineCardTableEntryClosure;
 
 class G1CollectedHeap : public SharedHeap {
@@ -237,6 +227,7 @@
   friend class EvacPopObjClosure;
   friend class G1ParCleanupCTTask;
 
+  friend class G1FreeHumongousRegionClosure;
   // Other related classes.
   friend class G1MarkSweep;
 
@@ -267,6 +258,9 @@
   // It keeps track of the humongous regions.
   HeapRegionSet _humongous_set;
 
+  void clear_humongous_is_live_table();
+  void eagerly_reclaim_humongous_regions();
+
   // The number of regions we could create by expansion.
   uint _expansion_regions;
 
@@ -367,10 +361,25 @@
   // than the current allocation region.
   size_t _summary_bytes_used;
 
-  // This array is used for a quick test on whether a reference points into
-  // the collection set or not. Each of the array's elements denotes whether the
-  // corresponding region is in the collection set or not.
-  G1FastCSetBiasedMappedArray _in_cset_fast_test;
+  // Records whether the region at the given index is kept live by roots or
+  // references from the young generation.
+  class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> {
+   protected:
+    bool default_value() const { return false; }
+   public:
+    void clear() { G1BiasedMappedArray<bool>::clear(); }
+    void set_live(uint region) {
+      set_by_index(region, true);
+    }
+    bool is_live(uint region) {
+      return get_by_index(region);
+    }
+  };
+
+  HumongousIsLiveBiasedMappedArray _humongous_is_live;
+  // Stores whether during humongous object registration we found candidate regions.
+  // If not, we can skip a few steps.
+  bool _has_humongous_reclaim_candidates;
 
   volatile unsigned _gc_time_stamp;
 
@@ -690,10 +699,24 @@
   virtual void gc_prologue(bool full);
   virtual void gc_epilogue(bool full);
 
+  inline void set_humongous_is_live(oop obj);
+
+  bool humongous_is_live(uint region) {
+    return _humongous_is_live.is_live(region);
+  }
+
+  // Returns whether the given region (which must be a humongous (start) region)
+  // is to be considered conservatively live regardless of any other conditions.
+  bool humongous_region_is_always_live(uint index);
+  // Register the given region to be part of the collection set.
+  inline void register_humongous_region_with_in_cset_fast_test(uint index);
+  // Register regions with humongous objects (actually on the start region) in
+  // the in_cset_fast_test table.
+  void register_humongous_regions_with_in_cset_fast_test();
   // We register a region with the fast "in collection set" test. We
   // simply set to true the array slot corresponding to this region.
   void register_region_with_in_cset_fast_test(HeapRegion* r) {
-    _in_cset_fast_test.set_by_index(r->hrs_index(), true);
+    _in_cset_fast_test.set_in_cset(r->hrs_index());
   }
 
   // This is a fast test on whether a reference points into the
@@ -1283,9 +1306,61 @@
   virtual bool is_in(const void* p) const;
 
   // Return "TRUE" iff the given object address is within the collection
-  // set.
+  // set. Slow implementation.
   inline bool obj_in_cs(oop obj);
 
+  inline bool is_in_cset(oop obj);
+
+  inline bool is_in_cset_or_humongous(const oop obj);
+
+  enum in_cset_state_t {
+   InNeither,           // neither in collection set nor humongous
+   InCSet,              // region is in collection set only
+   IsHumongous          // region is a humongous start region
+  };
+ private:
+  // Instances of this class are used for quick tests on whether a reference points
+  // into the collection set or is a humongous object (points into a humongous
+  // object).
+  // Each of the array's elements denotes whether the corresponding region is in
+  // the collection set or a humongous region.
+  // We use this to quickly reclaim humongous objects: by making a humongous region
+  // succeed this test, we sort-of add it to the collection set. During the reference
+  // iteration closures, when we see a humongous region, we simply mark it as
+  // referenced, i.e. live.
+  class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<char> {
+   protected:
+    char default_value() const { return G1CollectedHeap::InNeither; }
+   public:
+    void set_humongous(uintptr_t index) {
+      assert(get_by_index(index) != InCSet, "Should not overwrite InCSet values");
+      set_by_index(index, G1CollectedHeap::IsHumongous);
+    }
+
+    void clear_humongous(uintptr_t index) {
+      set_by_index(index, G1CollectedHeap::InNeither);
+    }
+
+    void set_in_cset(uintptr_t index) {
+      assert(get_by_index(index) != G1CollectedHeap::IsHumongous, "Should not overwrite IsHumongous value");
+      set_by_index(index, G1CollectedHeap::InCSet);
+    }
+
+    bool is_in_cset_or_humongous(HeapWord* addr) const { return get_by_address(addr) != G1CollectedHeap::InNeither; }
+    bool is_in_cset(HeapWord* addr) const { return get_by_address(addr) == G1CollectedHeap::InCSet; }
+    G1CollectedHeap::in_cset_state_t at(HeapWord* addr) const { return (G1CollectedHeap::in_cset_state_t)get_by_address(addr); }
+    void clear() { G1BiasedMappedArray<char>::clear(); }
+  };
+
+  // This array is used for a quick test on whether a reference points into
+  // the collection set or not. Each of the array's elements denotes whether the
+  // corresponding region is in the collection set or not.
+  G1FastCSetBiasedMappedArray _in_cset_fast_test;
+
+ public:
+
+  inline in_cset_state_t in_cset_state(const oop obj);
+
   // Return "TRUE" iff the given object address is in the reserved
   // region of g1.
   bool is_in_g1_reserved(const void* p) const {
@@ -1320,9 +1395,6 @@
   // "cl.do_oop" on each.
   virtual void oop_iterate(ExtendedOopClosure* cl);
 
-  // Same as above, restricted to a memory region.
-  void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
-
   // Iterate over all objects, calling "cl.do_object" on each.
   virtual void object_iterate(ObjectClosure* cl);
 
@@ -1340,6 +1412,10 @@
   // Return the region with the given index. It assumes the index is valid.
   inline HeapRegion* region_at(uint index) const;
 
+  // Calculate the region index of the given address. Given address must be
+  // within the heap.
+  inline uint addr_to_region(HeapWord* addr) const;
+
   // Divide the heap region sequence into "chunks" of some size (the number
   // of regions divided by the number of parallel threads times some
   // overpartition factor, currently 4).  Assumes that this will be called
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -40,6 +40,13 @@
 // Return the region with the given index. It assumes the index is valid.
 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); }
 
+inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
+  assert(is_in_reserved(addr),
+         err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")",
+                 p2i(addr), p2i(_reserved.start()), p2i(_reserved.end())));
+  return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
+}
+
 template <class T>
 inline HeapRegion*
 G1CollectedHeap::heap_region_containing_raw(const T addr) const {
@@ -172,12 +179,11 @@
   return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
 }
 
-
 // This is a fast test on whether a reference points into the
 // collection set or not. Assume that the reference
 // points into the heap.
-inline bool G1CollectedHeap::in_cset_fast_test(oop obj) {
-  bool ret = _in_cset_fast_test.get_by_address((HeapWord*)obj);
+inline bool G1CollectedHeap::is_in_cset(oop obj) {
+  bool ret = _in_cset_fast_test.is_in_cset((HeapWord*)obj);
   // let's make sure the result is consistent with what the slower
   // test returns
   assert( ret || !obj_in_cs(obj), "sanity");
@@ -185,6 +191,18 @@
   return ret;
 }
 
+bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
+  return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj);
+}
+
+G1CollectedHeap::in_cset_state_t G1CollectedHeap::in_cset_state(const oop obj) {
+  return _in_cset_fast_test.at((HeapWord*)obj);
+}
+
+void G1CollectedHeap::register_humongous_region_with_in_cset_fast_test(uint index) {
+  _in_cset_fast_test.set_humongous(index);
+}
+
 #ifndef PRODUCT
 // Support for G1EvacuationFailureALot
 
@@ -288,4 +306,22 @@
   return is_obj_ill(obj, heap_region_containing(obj));
 }
 
+inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
+  uint region = addr_to_region((HeapWord*)obj);
+  // We not only set the "live" flag in the humongous_is_live table, but also
+  // reset the entry in the _in_cset_fast_test table so that subsequent references
+  // to the same humongous object do not go into the slow path again.
+  // This is racy, as multiple threads may at the same time enter here, but this
+  // is benign.
+  // During collection we only ever set the "live" flag, and only ever clear the
+  // entry in the in_cset_fast_table.
+  // We only ever evaluate the contents of these tables (in the VM thread) after
+  // having synchronized the worker threads with the VM thread, or in the same
+  // thread (i.e. within the VM thread).
+  if (!_humongous_is_live.is_live(region)) {
+    _humongous_is_live.set_live(region);
+    _in_cset_fast_test.clear_humongous(region);
+  }
+}
+
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -237,8 +237,10 @@
   _last_gc_worker_times_ms.verify();
   _last_gc_worker_other_times_ms.verify();
 
-  _last_redirty_logged_cards_time_ms.verify();
-  _last_redirty_logged_cards_processed_cards.verify();
+  if (G1DeferredRSUpdate) {
+    _last_redirty_logged_cards_time_ms.verify();
+    _last_redirty_logged_cards_processed_cards.verify();
+  }
 }
 
 void G1GCPhaseTimes::note_string_dedup_fixup_start() {
@@ -255,6 +257,10 @@
   LineBuffer(level).append_and_print_cr("[%s: %.1lf ms]", str, value);
 }
 
+void G1GCPhaseTimes::print_stats(int level, const char* str, size_t value) {
+  LineBuffer(level).append_and_print_cr("[%s: "SIZE_FORMAT"]", str, value);
+}
+
 void G1GCPhaseTimes::print_stats(int level, const char* str, double value, uint workers) {
   LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: %u]", str, value, workers);
 }
@@ -357,6 +363,14 @@
       _last_redirty_logged_cards_processed_cards.print(3, "Redirtied Cards");
     }
   }
+  if (G1ReclaimDeadHumongousObjectsAtYoungGC) {
+    print_stats(2, "Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms);
+    if (G1Log::finest()) {
+      print_stats(3, "Humongous Total", _cur_fast_reclaim_humongous_total);
+      print_stats(3, "Humongous Candidate", _cur_fast_reclaim_humongous_candidates);
+      print_stats(3, "Humongous Reclaimed", _cur_fast_reclaim_humongous_reclaimed);
+    }
+  }
   print_stats(2, "Free CSet",
     (_recorded_young_free_cset_time_ms +
     _recorded_non_young_free_cset_time_ms));
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -157,11 +157,17 @@
   double _recorded_young_free_cset_time_ms;
   double _recorded_non_young_free_cset_time_ms;
 
+  double _cur_fast_reclaim_humongous_time_ms;
+  size_t _cur_fast_reclaim_humongous_total;
+  size_t _cur_fast_reclaim_humongous_candidates;
+  size_t _cur_fast_reclaim_humongous_reclaimed;
+
   double _cur_verify_before_time_ms;
   double _cur_verify_after_time_ms;
 
   // Helper methods for detailed logging
   void print_stats(int level, const char* str, double value);
+  void print_stats(int level, const char* str, size_t value);
   void print_stats(int level, const char* str, double value, uint workers);
 
  public:
@@ -282,6 +288,16 @@
     _recorded_non_young_free_cset_time_ms = time_ms;
   }
 
+  void record_fast_reclaim_humongous_stats(size_t total, size_t candidates) {
+    _cur_fast_reclaim_humongous_total = total;
+    _cur_fast_reclaim_humongous_candidates = candidates;
+  }
+
+  void record_fast_reclaim_humongous_time_ms(double value, size_t reclaimed) {
+    _cur_fast_reclaim_humongous_time_ms = value;
+    _cur_fast_reclaim_humongous_reclaimed = reclaimed;
+  }
+
   void record_young_cset_choice_time_ms(double time_ms) {
     _recorded_young_cset_choice_time_ms = time_ms;
   }
@@ -348,6 +364,10 @@
     return _recorded_non_young_free_cset_time_ms;
   }
 
+  double fast_reclaim_humongous_time_ms() {
+    return _cur_fast_reclaim_humongous_time_ms;
+  }
+
   double average_last_update_rs_time() {
     return _last_update_rs_times_ms.average();
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -44,7 +44,7 @@
 inline void FilterIntoCSClosure::do_oop_nv(T* p) {
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop) &&
-      _g1->obj_in_cs(oopDesc::decode_heap_oop_not_null(heap_oop))) {
+      _g1->is_in_cset_or_humongous(oopDesc::decode_heap_oop_not_null(heap_oop))) {
     _oc->do_oop(p);
   }
 }
@@ -67,7 +67,8 @@
 
   if (!oopDesc::is_null(heap_oop)) {
     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-    if (_g1->in_cset_fast_test(obj)) {
+    G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj);
+    if (state == G1CollectedHeap::InCSet) {
       // We're not going to even bother checking whether the object is
       // already forwarded or not, as this usually causes an immediate
       // stall. We'll try to prefetch the object (for write, given that
@@ -86,6 +87,9 @@
 
       _par_scan_state->push_on_queue(p);
     } else {
+      if (state == G1CollectedHeap::IsHumongous) {
+        _g1->set_humongous_is_live(obj);
+      }
       _par_scan_state->update_rs(_from, p, _worker_id);
     }
   }
@@ -97,12 +101,14 @@
 
   if (!oopDesc::is_null(heap_oop)) {
     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-    if (_g1->in_cset_fast_test(obj)) {
+    if (_g1->is_in_cset_or_humongous(obj)) {
       Prefetch::write(obj->mark_addr(), 0);
       Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
 
       // Place on the references queue
       _par_scan_state->push_on_queue(p);
+    } else {
+      assert(!_g1->obj_in_cs(obj), "checking");
     }
   }
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -288,7 +288,12 @@
 }
 
 HeapWord* G1ParScanThreadState::allocate(GCAllocPurpose purpose, size_t word_sz) {
-  HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
+  HeapWord* obj = NULL;
+  if (purpose == GCAllocForSurvived) {
+    obj = alloc_buffer(GCAllocForSurvived)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
+  } else {
+    obj = alloc_buffer(GCAllocForTenured)->allocate(word_sz);
+  }
   if (obj != NULL) {
     return obj;
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -52,15 +52,20 @@
   // set, due to (benign) races in the claim mechanism during RSet scanning more
   // than one thread might claim the same card. So the same card may be
   // processed multiple times. So redo this check.
-  if (_g1h->in_cset_fast_test(obj)) {
+  G1CollectedHeap::in_cset_state_t in_cset_state = _g1h->in_cset_state(obj);
+  if (in_cset_state == G1CollectedHeap::InCSet) {
     oop forwardee;
     if (obj->is_forwarded()) {
       forwardee = obj->forwardee();
     } else {
       forwardee = copy_to_survivor_space(obj);
     }
-    assert(forwardee != NULL, "forwardee should not be NULL");
     oopDesc::encode_store_heap_oop(p, forwardee);
+  } else if (in_cset_state == G1CollectedHeap::IsHumongous) {
+    _g1h->set_humongous_is_live(obj);
+  } else {
+    assert(in_cset_state == G1CollectedHeap::InNeither,
+           err_msg("In_cset_state must be InNeither here, but is %d", in_cset_state));
   }
 
   assert(obj != NULL, "Must be");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -349,23 +349,8 @@
 
   assert((ParallelGCThreads > 0) || worker_i == 0, "invariant");
 
-  // The two flags below were introduced temporarily to serialize
-  // the updating and scanning of remembered sets. There are some
-  // race conditions when these two operations are done in parallel
-  // and they are causing failures. When we resolve said race
-  // conditions, we'll revert back to parallel remembered set
-  // updating and scanning. See CRs 6677707 and 6677708.
-  if (G1UseParallelRSetUpdating || (worker_i == 0)) {
-    updateRS(&into_cset_dcq, worker_i);
-  } else {
-    _g1p->phase_times()->record_update_rs_processed_buffers(worker_i, 0);
-    _g1p->phase_times()->record_update_rs_time(worker_i, 0.0);
-  }
-  if (G1UseParallelRSetScanning || (worker_i == 0)) {
-    scanRS(oc, code_root_cl, worker_i);
-  } else {
-    _g1p->phase_times()->record_scan_rs_time(worker_i, 0.0);
-  }
+  updateRS(&into_cset_dcq, worker_i);
+  scanRS(oc, code_root_cl, worker_i);
 
   // We now clear the cached values of _cset_rs_update_cl for this worker
   _cset_rs_update_cl[worker_i] = NULL;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -220,14 +220,6 @@
   product(uintx, G1HeapRegionSize, 0,                                       \
           "Size of the G1 regions.")                                        \
                                                                             \
-  experimental(bool, G1UseParallelRSetUpdating, true,                       \
-          "Enables the parallelization of remembered set updating "         \
-          "during evacuation pauses")                                       \
-                                                                            \
-  experimental(bool, G1UseParallelRSetScanning, true,                       \
-          "Enables the parallelization of remembered set scanning "         \
-          "during evacuation pauses")                                       \
-                                                                            \
   product(uintx, G1ConcRefinementThreads, 0,                                \
           "If non-0 is the number of parallel rem set update threads, "     \
           "otherwise the value is determined ergonomically.")               \
@@ -289,6 +281,13 @@
           "The amount of code root chunks that should be kept at most "     \
           "as percentage of already allocated.")                            \
                                                                             \
+  experimental(bool, G1ReclaimDeadHumongousObjectsAtYoungGC, true,          \
+          "Try to reclaim dead large objects at every young GC.")           \
+                                                                            \
+  experimental(bool, G1TraceReclaimDeadHumongousObjectsAtYoungGC, false,    \
+          "Print some information about large object liveness "             \
+          "at every young GC.")                                             \
+                                                                            \
   experimental(uintx, G1OldCSetRegionThresholdPercent, 10,                  \
           "An upper bound for the number of old CSet regions expressed "    \
           "as a percentage of the heap size.")                              \
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -94,26 +94,37 @@
 inline bool
 HeapRegion::block_is_obj(const HeapWord* p) const {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  return !g1h->is_obj_dead(oop(p), this);
+  if (ClassUnloadingWithConcurrentMark) {
+    return !g1h->is_obj_dead(oop(p), this);
+  }
+  return p < top();
 }
 
 inline size_t
 HeapRegion::block_size(const HeapWord *addr) const {
+  if (addr == top()) {
+    return pointer_delta(end(), addr);
+  }
+
+  if (block_is_obj(addr)) {
+    return oop(addr)->size();
+  }
+
+  assert(ClassUnloadingWithConcurrentMark,
+      err_msg("All blocks should be objects if G1 Class Unloading isn't used. "
+              "HR: ["PTR_FORMAT", "PTR_FORMAT", "PTR_FORMAT") "
+              "addr: " PTR_FORMAT,
+              p2i(bottom()), p2i(top()), p2i(end()), p2i(addr)));
+
   // Old regions' dead objects may have dead classes
   // We need to find the next live object in some other
   // manner than getting the oop size
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  if (g1h->is_obj_dead(oop(addr), this)) {
-    HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
-        getNextMarkedWordAddress(addr, prev_top_at_mark_start());
-
-    assert(next > addr, "must get the next live object");
+  HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
+      getNextMarkedWordAddress(addr, prev_top_at_mark_start());
 
-    return pointer_delta(next, addr);
-  } else if (addr == top()) {
-    return pointer_delta(end(), addr);
-  }
-  return oop(addr)->size();
+  assert(next > addr, "must get the next live object");
+  return pointer_delta(next, addr);
 }
 
 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) {
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -289,7 +289,7 @@
   }
 
   _fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries,
-                        mtGC, 0, AllocFailStrategy::RETURN_NULL);
+                        mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
 
   if (_fine_grain_regions == NULL) {
     vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
@@ -695,6 +695,9 @@
   clear_fcc();
 }
 
+bool OtherRegionsTable::is_empty() const {
+  return occ_sparse() == 0 && occ_coarse() == 0 && _first_all_fine_prts == NULL;
+}
 
 size_t OtherRegionsTable::occupied() const {
   size_t sum = occ_fine();
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -185,6 +185,9 @@
   // objects.
   void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
 
+  // Returns whether this remembered set (and all sub-sets) contain no entries.
+  bool is_empty() const;
+
   size_t occupied() const;
   size_t occ_fine() const;
   size_t occ_coarse() const;
@@ -269,6 +272,10 @@
     return _other_regions.hr();
   }
 
+  bool is_empty() const {
+    return (strong_code_roots_list_length() == 0) && _other_regions.is_empty();
+  }
+
   size_t occupied() {
     MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
     return occupied_locked();
@@ -371,7 +378,7 @@
   void strong_code_roots_do(CodeBlobClosure* blk) const;
 
   // Returns the number of elements in the strong code roots list
-  size_t strong_code_roots_list_length() {
+  size_t strong_code_roots_list_length() const {
     return _code_roots.length();
   }
 
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -28,12 +28,12 @@
 #include "gc_implementation/parNew/parOopClosures.inline.hpp"
 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
 #include "gc_implementation/shared/ageTable.hpp"
-#include "gc_implementation/shared/parGCAllocBuffer.hpp"
+#include "gc_implementation/shared/copyFailedInfo.hpp"
 #include "gc_implementation/shared/gcHeapSummary.hpp"
 #include "gc_implementation/shared/gcTimer.hpp"
 #include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/gcTraceTime.hpp"
-#include "gc_implementation/shared/copyFailedInfo.hpp"
+#include "gc_implementation/shared/parGCAllocBuffer.inline.hpp"
 #include "gc_implementation/shared/spaceDecorator.hpp"
 #include "memory/defNewGeneration.inline.hpp"
 #include "memory/genCollectedHeap.hpp"
@@ -252,7 +252,7 @@
         plab->set_word_size(buf_size);
         plab->set_buf(buf_space);
         record_survivor_plab(buf_space, buf_size);
-        obj = plab->allocate(word_sz);
+        obj = plab->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
         // Note that we cannot compare buf_size < word_sz below
         // because of AlignmentReserve (see ParGCAllocBuffer::allocate()).
         assert(obj != NULL || plab->words_remaining() < word_sz,
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -168,7 +168,7 @@
   HeapWord* alloc_in_to_space_slow(size_t word_sz);
 
   HeapWord* alloc_in_to_space(size_t word_sz) {
-    HeapWord* obj = to_space_alloc_buffer()->allocate(word_sz);
+    HeapWord* obj = to_space_alloc_buffer()->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
     if (obj != NULL) return obj;
     else return alloc_in_to_space_slow(word_sz);
   }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONLAB_HPP
 
 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
+#include "gc_interface/collectedHeap.inline.hpp"
 #include "memory/allocation.hpp"
 
 //
@@ -94,23 +95,9 @@
   PSYoungPromotionLAB() { }
 
   // Not MT safe
-  HeapWord* allocate(size_t size) {
-    // Can't assert this, when young fills, we keep the LAB around, but flushed.
-    // assert(_state != flushed, "Sanity");
-    HeapWord* obj = top();
-    HeapWord* new_top = obj + size;
-    // The 'new_top>obj' check is needed to detect overflow of obj+size.
-    if (new_top > obj && new_top <= end()) {
-      set_top(new_top);
-      assert(is_object_aligned((intptr_t)obj) && is_object_aligned((intptr_t)new_top),
-             "checking alignment");
-      return obj;
-    }
+  inline HeapWord* allocate(size_t size);
 
-    return NULL;
-  }
-
-  debug_only(virtual bool lab_is_valid(MemRegion lab));
+  debug_only(virtual bool lab_is_valid(MemRegion lab);)
 };
 
 class PSOldPromotionLAB : public PSPromotionLAB {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.inline.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONLAB_INLINE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONLAB_INLINE_HPP
+
+#include "gc_implementation/parallelScavenge/psPromotionLAB.hpp"
+#include "gc_interface/collectedHeap.inline.hpp"
+
+HeapWord* PSYoungPromotionLAB::allocate(size_t size) {
+  // Can't assert this, when young fills, we keep the LAB around, but flushed.
+  // assert(_state != flushed, "Sanity");
+  HeapWord* obj = CollectedHeap::align_allocation_or_fail(top(), end(), SurvivorAlignmentInBytes);
+  if (obj == NULL) {
+    return NULL;
+  }
+
+  HeapWord* new_top = obj + size;
+  // The 'new_top>obj' check is needed to detect overflow of obj+size.
+  if (new_top > obj && new_top <= end()) {
+    set_top(new_top);
+    assert(is_ptr_aligned(obj, SurvivorAlignmentInBytes) && is_object_aligned((intptr_t)new_top),
+           "checking alignment");
+    return obj;
+  } else {
+    set_top(obj);
+    return NULL;
+  }
+}
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONLAB_INLINE_HPP
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
 
 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
+#include "gc_implementation/parallelScavenge/psPromotionLAB.inline.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
 #include "oops/oop.psgc.inline.hpp"
 
--- a/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -24,7 +24,7 @@
 
 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
-
+#include "gc_interface/collectedHeap.hpp"
 #include "memory/allocation.hpp"
 #include "memory/blockOffsetTable.hpp"
 #include "memory/threadLocalAllocBuffer.hpp"
@@ -84,6 +84,9 @@
     }
   }
 
+  // Allocate the object aligned to "alignment_in_bytes".
+  HeapWord* allocate_aligned(size_t word_sz, unsigned short alignment_in_bytes);
+
   // Undo the last allocation in the buffer, which is required to be of the
   // "obj" of the given "word_sz".
   void undo_allocation(HeapWord* obj, size_t word_sz) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.inline.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_PARGCALLOCBUFFER_INLINE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_PARGCALLOCBUFFER_INLINE_HPP
+
+#include "gc_implementation/shared/parGCAllocBuffer.hpp"
+#include "gc_interface/collectedHeap.inline.hpp"
+
+HeapWord* ParGCAllocBuffer::allocate_aligned(size_t word_sz, unsigned short alignment_in_bytes) {
+
+  HeapWord* res = CollectedHeap::align_allocation_or_fail(_top, _end, alignment_in_bytes);
+  if (res == NULL) {
+    return NULL;
+  }
+
+  // Set _top so that allocate(), which expects _top to be correctly set,
+  // can be used below.
+  _top = res;
+  return allocate(word_sz);
+}
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_PARGCALLOCBUFFER_INLINE_HPP
--- a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -195,6 +195,7 @@
   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
 }
 
+// Returns true iff concurrent GCs unloads metadata.
 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
 #if INCLUDE_ALL_GCS
   if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
@@ -202,7 +203,7 @@
     return true;
   }
 
-  if (UseG1GC) {
+  if (UseG1GC && ClassUnloadingWithConcurrentMark) {
     G1CollectedHeap* g1h = G1CollectedHeap::heap();
     g1h->g1_policy()->set_initiate_conc_mark_if_possible();
 
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -351,6 +351,12 @@
     fill_with_object(start, pointer_delta(end, start), zap);
   }
 
+  // Return the address "addr" aligned by "alignment_in_bytes" if such
+  // an address is below "end".  Return NULL otherwise.
+  inline static HeapWord* align_allocation_or_fail(HeapWord* addr,
+                                                   HeapWord* end,
+                                                   unsigned short alignment_in_bytes);
+
   // Some heaps may offer a contiguous region for shared non-blocking
   // allocation, via inlined code (by exporting the address of the top and
   // end fields defining the extent of the contiguous allocation region.)
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -241,6 +241,44 @@
   oop_iterate(&no_header_cl);
 }
 
+
+inline HeapWord* CollectedHeap::align_allocation_or_fail(HeapWord* addr,
+                                                         HeapWord* end,
+                                                         unsigned short alignment_in_bytes) {
+  if (alignment_in_bytes <= ObjectAlignmentInBytes) {
+    return addr;
+  }
+
+  assert(is_ptr_aligned(addr, HeapWordSize),
+    err_msg("Address " PTR_FORMAT " is not properly aligned.", p2i(addr)));
+  assert(is_size_aligned(alignment_in_bytes, HeapWordSize),
+    err_msg("Alignment size %u is incorrect.", alignment_in_bytes));
+
+  HeapWord* new_addr = (HeapWord*) align_pointer_up(addr, alignment_in_bytes);
+  size_t padding = pointer_delta(new_addr, addr);
+
+  if (padding == 0) {
+    return addr;
+  }
+
+  if (padding < CollectedHeap::min_fill_size()) {
+    padding += alignment_in_bytes / HeapWordSize;
+    assert(padding >= CollectedHeap::min_fill_size(),
+      err_msg("alignment_in_bytes %u is expect to be larger "
+      "than the minimum object size", alignment_in_bytes));
+    new_addr = addr + padding;
+  }
+
+  assert(new_addr > addr, err_msg("Unexpected arithmetic overflow "
+    PTR_FORMAT " not greater than " PTR_FORMAT, p2i(new_addr), p2i(addr)));
+  if(new_addr < end) {
+    CollectedHeap::fill_with_object(addr, padding);
+    return new_addr;
+  } else {
+    return NULL;
+  }
+}
+
 #ifndef PRODUCT
 
 inline bool
--- a/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -280,9 +280,6 @@
   address generate_result_handler_for(BasicType type);
   address generate_slow_signature_handler();
 
-  // entry point generator
-  address generate_method_entry(AbstractInterpreter::MethodKind kind);
-
   void bang_stack_shadow_pages(bool native_call);
 
   void generate_all();
--- a/hotspot/src/share/vm/interpreter/cppInterpreter.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/interpreter/cppInterpreter.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -108,7 +108,7 @@
   }
 
 
-#define method_entry(kind) Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind)
+#define method_entry(kind) Interpreter::_entry_table[Interpreter::kind] = ((InterpreterGenerator*)this)->generate_method_entry(Interpreter::kind)
 
   { CodeletMark cm(_masm, "(kind = frame_manager)");
     // all non-native method kinds
--- a/hotspot/src/share/vm/interpreter/interpreter.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreter.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -29,6 +29,7 @@
 #include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/bytecodeInterpreter.hpp"
 #include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "interpreter/interp_masm.hpp"
 #include "interpreter/templateTable.hpp"
@@ -261,7 +262,7 @@
   // Special intrinsic method?
   // Note: This test must come _after_ the test for native methods,
   //       otherwise we will run into problems with JDK 1.2, see also
-  //       AbstractInterpreterGenerator::generate_method_entry() for
+  //       InterpreterGenerator::generate_method_entry() for
   //       for details.
   switch (m->intrinsic_id()) {
     case vmIntrinsics::_dsin  : return java_lang_math_sin  ;
@@ -521,3 +522,50 @@
     Interpreter::_entry_table[kind] = Interpreter::_entry_table[Interpreter::abstract];
   }
 }
+
+// Generate method entries
+address InterpreterGenerator::generate_method_entry(
+                                        AbstractInterpreter::MethodKind kind) {
+  // determine code generation flags
+  bool synchronized = false;
+  address entry_point = NULL;
+
+  switch (kind) {
+  case Interpreter::zerolocals             :                                                      break;
+  case Interpreter::zerolocals_synchronized: synchronized = true;                                 break;
+  case Interpreter::native                 : entry_point = generate_native_entry(false); break;
+  case Interpreter::native_synchronized    : entry_point = generate_native_entry(true);  break;
+  case Interpreter::empty                  : entry_point = generate_empty_entry(); break;
+  case Interpreter::accessor               : entry_point = generate_accessor_entry(); break;
+  case Interpreter::abstract               : entry_point = generate_abstract_entry();    break;
+
+  case Interpreter::java_lang_math_sin     : // fall thru
+  case Interpreter::java_lang_math_cos     : // fall thru
+  case Interpreter::java_lang_math_tan     : // fall thru
+  case Interpreter::java_lang_math_abs     : // fall thru
+  case Interpreter::java_lang_math_log     : // fall thru
+  case Interpreter::java_lang_math_log10   : // fall thru
+  case Interpreter::java_lang_math_sqrt    : // fall thru
+  case Interpreter::java_lang_math_pow     : // fall thru
+  case Interpreter::java_lang_math_exp     : entry_point = generate_math_entry(kind);      break;
+  case Interpreter::java_lang_ref_reference_get
+                                           : entry_point = generate_Reference_get_entry(); break;
+#ifndef CC_INTERP
+  case Interpreter::java_util_zip_CRC32_update
+                                           : entry_point = generate_CRC32_update_entry();  break;
+  case Interpreter::java_util_zip_CRC32_updateBytes
+                                           : // fall thru
+  case Interpreter::java_util_zip_CRC32_updateByteBuffer
+                                           : entry_point = generate_CRC32_updateBytes_entry(kind); break;
+#endif // CC_INTERP
+  default:
+    fatal(err_msg("unexpected method kind: %d", kind));
+    break;
+  }
+
+  if (entry_point) {
+    return entry_point;
+  }
+
+  return generate_normal_entry(synchronized);
+}
--- a/hotspot/src/share/vm/interpreter/interpreterGenerator.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreterGenerator.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,9 +37,11 @@
 class InterpreterGenerator: public CC_INTERP_ONLY(CppInterpreterGenerator)
                                    NOT_CC_INTERP(TemplateInterpreterGenerator) {
 
-public:
+ public:
 
-InterpreterGenerator(StubQueue* _code);
+  InterpreterGenerator(StubQueue* _code);
+  // entry point generator
+  address generate_method_entry(AbstractInterpreter::MethodKind kind);
 
 #ifdef TARGET_ARCH_x86
 # include "interpreterGenerator_x86.hpp"
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -987,17 +987,6 @@
   int index = cp_entry->field_index();
   if ((ik->field_access_flags(index) & JVM_ACC_FIELD_ACCESS_WATCHED) == 0) return;
 
-  switch(cp_entry->flag_state()) {
-    case btos:    // fall through
-    case ctos:    // fall through
-    case stos:    // fall through
-    case itos:    // fall through
-    case ftos:    // fall through
-    case ltos:    // fall through
-    case dtos:    // fall through
-    case atos: break;
-    default: ShouldNotReachHere(); return;
-  }
   bool is_static = (obj == NULL);
   HandleMark hm(thread);
 
--- a/hotspot/src/share/vm/interpreter/templateInterpreter.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/interpreter/templateInterpreter.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -364,7 +364,7 @@
 
 #define method_entry(kind)                                                                    \
   { CodeletMark cm(_masm, "method entry point (kind = " #kind ")");                    \
-    Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind);  \
+    Interpreter::_entry_table[Interpreter::kind] = ((InterpreterGenerator*)this)->generate_method_entry(Interpreter::kind);  \
   }
 
   // all non-native method kinds
--- a/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -59,9 +59,6 @@
   address generate_safept_entry_for(TosState state, address runtime_entry);
   void    generate_throw_exception();
 
-  // entry point generator
-//   address generate_method_entry(AbstractInterpreter::MethodKind kind);
-
   // Instruction generation
   void generate_and_dispatch (Template* t, TosState tos_out = ilgl);
   void set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep);
--- a/hotspot/src/share/vm/memory/allocation.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/memory/allocation.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -422,26 +422,23 @@
 }
 
 //------------------------------Arena------------------------------------------
-NOT_PRODUCT(volatile jint Arena::_instance_count = 0;)
 
-Arena::Arena(size_t init_size) {
+Arena::Arena(MEMFLAGS flag, size_t init_size) : _flags(flag), _size_in_bytes(0)  {
   size_t round_size = (sizeof (char *)) - 1;
   init_size = (init_size+round_size) & ~round_size;
   _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
   _hwm = _chunk->bottom();      // Save the cached hwm, max
   _max = _chunk->top();
-  _size_in_bytes = 0;
+  MemTracker::record_new_arena(flag);
   set_size_in_bytes(init_size);
-  NOT_PRODUCT(Atomic::inc(&_instance_count);)
 }
 
-Arena::Arena() {
+Arena::Arena(MEMFLAGS flag) : _flags(flag), _size_in_bytes(0) {
   _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
   _hwm = _chunk->bottom();      // Save the cached hwm, max
   _max = _chunk->top();
-  _size_in_bytes = 0;
+  MemTracker::record_new_arena(flag);
   set_size_in_bytes(Chunk::init_size);
-  NOT_PRODUCT(Atomic::inc(&_instance_count);)
 }
 
 Arena *Arena::move_contents(Arena *copy) {
@@ -463,7 +460,7 @@
 
 Arena::~Arena() {
   destruct_contents();
-  NOT_PRODUCT(Atomic::dec(&_instance_count);)
+  MemTracker::record_arena_free(_flags);
 }
 
 void* Arena::operator new(size_t size) throw() {
@@ -479,21 +476,21 @@
   // dynamic memory type binding
 void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {
 #ifdef ASSERT
-  void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC);
+  void* p = (void*)AllocateHeap(size, flags, CALLER_PC);
   if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
   return p;
 #else
-  return (void *) AllocateHeap(size, flags|otArena, CALLER_PC);
+  return (void *) AllocateHeap(size, flags, CALLER_PC);
 #endif
 }
 
 void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {
 #ifdef ASSERT
-  void* p = os::malloc(size, flags|otArena, CALLER_PC);
+  void* p = os::malloc(size, flags, CALLER_PC);
   if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
   return p;
 #else
-  return os::malloc(size, flags|otArena, CALLER_PC);
+  return os::malloc(size, flags, CALLER_PC);
 #endif
 }
 
@@ -518,8 +515,9 @@
 // change the size
 void Arena::set_size_in_bytes(size_t size) {
   if (_size_in_bytes != size) {
+    long delta = (long)(size - size_in_bytes());
     _size_in_bytes = size;
-    MemTracker::record_arena_size((address)this, size);
+    MemTracker::record_arena_size_change(delta, _flags);
   }
 }
 
--- a/hotspot/src/share/vm/memory/allocation.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/memory/allocation.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -133,51 +133,34 @@
 
 
 /*
- * MemoryType bitmap layout:
- * | 16 15 14 13 12 11 10 09 | 08 07 06 05 | 04 03 02 01 |
- * |      memory type        |   object    | reserved    |
- * |                         |     type    |             |
+ * Memory types
  */
 enum MemoryType {
   // Memory type by sub systems. It occupies lower byte.
-  mtNone              = 0x0000,  // undefined
-  mtClass             = 0x0100,  // memory class for Java classes
-  mtThread            = 0x0200,  // memory for thread objects
-  mtThreadStack       = 0x0300,
-  mtCode              = 0x0400,  // memory for generated code
-  mtGC                = 0x0500,  // memory for GC
-  mtCompiler          = 0x0600,  // memory for compiler
-  mtInternal          = 0x0700,  // memory used by VM, but does not belong to
+  mtJavaHeap          = 0x00,  // Java heap
+  mtClass             = 0x01,  // memory class for Java classes
+  mtThread            = 0x02,  // memory for thread objects
+  mtThreadStack       = 0x03,
+  mtCode              = 0x04,  // memory for generated code
+  mtGC                = 0x05,  // memory for GC
+  mtCompiler          = 0x06,  // memory for compiler
+  mtInternal          = 0x07,  // memory used by VM, but does not belong to
                                  // any of above categories, and not used for
                                  // native memory tracking
-  mtOther             = 0x0800,  // memory not used by VM
-  mtSymbol            = 0x0900,  // symbol
-  mtNMT               = 0x0A00,  // memory used by native memory tracking
-  mtChunk             = 0x0B00,  // chunk that holds content of arenas
-  mtJavaHeap          = 0x0C00,  // Java heap
-  mtClassShared       = 0x0D00,  // class data sharing
-  mtTest              = 0x0E00,  // Test type for verifying NMT
-  mtTracing           = 0x0F00,  // memory used for Tracing
-  mt_number_of_types  = 0x000F,  // number of memory types (mtDontTrack
+  mtOther             = 0x08,  // memory not used by VM
+  mtSymbol            = 0x09,  // symbol
+  mtNMT               = 0x0A,  // memory used by native memory tracking
+  mtClassShared       = 0x0B,  // class data sharing
+  mtChunk             = 0x0C,  // chunk that holds content of arenas
+  mtTest              = 0x0D,  // Test type for verifying NMT
+  mtTracing           = 0x0E,  // memory used for Tracing
+  mtNone              = 0x0F,  // undefined
+  mt_number_of_types  = 0x10   // number of memory types (mtDontTrack
                                  // is not included as validate type)
-  mtDontTrack         = 0x0F00,  // memory we do not or cannot track
-  mt_masks            = 0x7F00,
-
-  // object type mask
-  otArena             = 0x0010, // an arena object
-  otNMTRecorder       = 0x0020, // memory recorder object
-  ot_masks            = 0x00F0
 };
 
-#define IS_MEMORY_TYPE(flags, type) ((flags & mt_masks) == type)
-#define HAS_VALID_MEMORY_TYPE(flags)((flags & mt_masks) != mtNone)
-#define FLAGS_TO_MEMORY_TYPE(flags) (flags & mt_masks)
+typedef MemoryType MEMFLAGS;
 
-#define IS_ARENA_OBJ(flags)         ((flags & ot_masks) == otArena)
-#define IS_NMT_RECORDER(flags)      ((flags & ot_masks) == otNMTRecorder)
-#define NMT_CAN_TRACK(flags)        (!IS_NMT_RECORDER(flags) && !(IS_MEMORY_TYPE(flags, mtDontTrack)))
-
-typedef unsigned short MEMFLAGS;
 
 #if INCLUDE_NMT
 
@@ -189,27 +172,23 @@
 
 #endif // INCLUDE_NMT
 
-// debug build does not inline
-#if defined(_NMT_NOINLINE_)
-  #define CURRENT_PC       (NMT_track_callsite ? os::get_caller_pc(1) : 0)
-  #define CALLER_PC        (NMT_track_callsite ? os::get_caller_pc(2) : 0)
-  #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(3) : 0)
-#else
-  #define CURRENT_PC      (NMT_track_callsite? os::get_caller_pc(0) : 0)
-  #define CALLER_PC       (NMT_track_callsite ? os::get_caller_pc(1) : 0)
-  #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0)
-#endif
-
+class NativeCallStack;
 
 
 template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
  public:
-  _NOINLINE_ void* operator new(size_t size, address caller_pc = 0) throw();
+  _NOINLINE_ void* operator new(size_t size, const NativeCallStack& stack) throw();
+  _NOINLINE_ void* operator new(size_t size) throw();
   _NOINLINE_ void* operator new (size_t size, const std::nothrow_t&  nothrow_constant,
-                               address caller_pc = 0) throw();
-  _NOINLINE_ void* operator new [](size_t size, address caller_pc = 0) throw();
+                               const NativeCallStack& stack) throw();
+  _NOINLINE_ void* operator new (size_t size, const std::nothrow_t&  nothrow_constant)
+                               throw();
+  _NOINLINE_ void* operator new [](size_t size, const NativeCallStack& stack) throw();
+  _NOINLINE_ void* operator new [](size_t size) throw();
   _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t&  nothrow_constant,
-                               address caller_pc = 0) throw();
+                               const NativeCallStack& stack) throw();
+  _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t&  nothrow_constant)
+                               throw();
   void  operator delete(void* p);
   void  operator delete [] (void* p);
 };
@@ -384,13 +363,15 @@
 
 //------------------------------Arena------------------------------------------
 // Fast allocation of memory
-class Arena : public CHeapObj<mtNone|otArena> {
+class Arena : public CHeapObj<mtNone> {
 protected:
   friend class ResourceMark;
   friend class HandleMark;
   friend class NoHandleMark;
   friend class VMStructs;
 
+  MEMFLAGS    _flags;           // Memory tracking flags
+
   Chunk *_first;                // First chunk
   Chunk *_chunk;                // current chunk
   char *_hwm, *_max;            // High water mark and max in current chunk
@@ -418,8 +399,8 @@
  }
 
  public:
-  Arena();
-  Arena(size_t init_size);
+  Arena(MEMFLAGS memflag);
+  Arena(MEMFLAGS memflag, size_t init_size);
   ~Arena();
   void  destruct_contents();
   char* hwm() const             { return _hwm; }
@@ -518,8 +499,6 @@
   static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2)  PRODUCT_RETURN;
   static void free_all(char** start, char** end)                                     PRODUCT_RETURN;
 
-  // how many arena instances
-  NOT_PRODUCT(static volatile jint _instance_count;)
 private:
   // Reset this Arena to empty, access will trigger grow if necessary
   void   reset(void) {
@@ -681,7 +660,7 @@
   NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL)
 
 #define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\
-  NEW_C_HEAP_ARRAY3(type, (size), memflags, (address)0, AllocFailStrategy::RETURN_NULL)
+  NEW_C_HEAP_ARRAY3(type, (size), memflags, CURRENT_PC, AllocFailStrategy::RETURN_NULL)
 
 #define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
   (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags))
--- a/hotspot/src/share/vm/memory/allocation.inline.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/memory/allocation.inline.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
 
 #include "runtime/atomic.inline.hpp"
 #include "runtime/os.hpp"
+#include "services/memTracker.hpp"
 
 // Explicit C-heap memory management
 
@@ -49,12 +50,10 @@
 #endif
 
 // allocate using malloc; will fail if no memory available
-inline char* AllocateHeap(size_t size, MEMFLAGS flags, address pc = 0,
+inline char* AllocateHeap(size_t size, MEMFLAGS flags,
+    const NativeCallStack& stack,
     AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
-  if (pc == 0) {
-    pc = CURRENT_PC;
-  }
-  char* p = (char*) os::malloc(size, flags, pc);
+  char* p = (char*) os::malloc(size, flags, stack);
   #ifdef ASSERT
   if (PrintMallocFree) trace_heap_malloc(size, "AllocateHeap", p);
   #endif
@@ -63,10 +62,14 @@
   }
   return p;
 }
+inline char* AllocateHeap(size_t size, MEMFLAGS flags,
+    AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
+  return AllocateHeap(size, flags, CURRENT_PC, alloc_failmode);
+}
 
-inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flags,
+inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flag,
     AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
-  char* p = (char*) os::realloc(old, size, flags, CURRENT_PC);
+  char* p = (char*) os::realloc(old, size, flag, CURRENT_PC);
   #ifdef ASSERT
   if (PrintMallocFree) trace_heap_malloc(size, "ReallocateHeap", p);
   #endif
@@ -85,8 +88,22 @@
 
 
 template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
-      address caller_pc) throw() {
-    void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC));
+      const NativeCallStack& stack) throw() {
+  void* p = (void*)AllocateHeap(size, F, stack);
+#ifdef ASSERT
+  if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
+#endif
+  return p;
+}
+
+template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size) throw() {
+  return CHeapObj<F>::operator new(size, CALLER_PC);
+}
+
+template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
+  const std::nothrow_t&  nothrow_constant, const NativeCallStack& stack) throw() {
+  void* p = (void*)AllocateHeap(size, F, stack,
+      AllocFailStrategy::RETURN_NULL);
 #ifdef ASSERT
     if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
 #endif
@@ -94,23 +111,28 @@
   }
 
 template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
-  const std::nothrow_t&  nothrow_constant, address caller_pc) throw() {
-  void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC),
-      AllocFailStrategy::RETURN_NULL);
-#ifdef ASSERT
-    if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
-#endif
-    return p;
+  const std::nothrow_t& nothrow_constant) throw() {
+  return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
 }
 
 template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
-      address caller_pc) throw() {
-    return CHeapObj<F>::operator new(size, caller_pc);
+      const NativeCallStack& stack) throw() {
+  return CHeapObj<F>::operator new(size, stack);
+}
+
+template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size)
+  throw() {
+  return CHeapObj<F>::operator new(size, CALLER_PC);
 }
 
 template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
-  const std::nothrow_t&  nothrow_constant, address caller_pc) throw() {
-    return CHeapObj<F>::operator new(size, nothrow_constant, caller_pc);
+  const std::nothrow_t&  nothrow_constant, const NativeCallStack& stack) throw() {
+  return CHeapObj<F>::operator new(size, nothrow_constant, stack);
+}
+
+template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
+  const std::nothrow_t& nothrow_constant) throw() {
+  return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
 }
 
 template <MEMFLAGS F> void CHeapObj<F>::operator delete(void* p){
--- a/hotspot/src/share/vm/memory/cardTableRS.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/memory/cardTableRS.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -56,7 +56,7 @@
 #endif
   set_bs(_ct_bs);
   _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1,
-                         mtGC, 0, AllocFailStrategy::RETURN_NULL);
+                         mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
   if (_last_cur_val_in_gen == NULL) {
     vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
   }
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -176,13 +176,9 @@
 
   size_t alignment = GenRemSet::max_alignment_constraint();
 
-  // Parallel GC does its own alignment of the generations to avoid requiring a
-  // large page (256M on some platforms) for the permanent generation.  The
-  // other collectors should also be updated to do their own alignment and then
-  // this use of lcm() should be removed.
-  if (UseLargePages && !UseParallelGC) {
+  if (UseLargePages) {
       // In presence of large pages we have to make sure that our
-      // alignment is large page aware
+      // alignment is large page aware.
       alignment = lcm(os::large_page_size(), alignment);
   }
 
@@ -909,7 +905,8 @@
 }
 
 void MarkSweepPolicy::initialize_generations() {
-  _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
+  _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, CURRENT_PC,
+    AllocFailStrategy::RETURN_NULL);
   if (_generations == NULL) {
     vm_exit_during_initialization("Unable to allocate gen spec");
   }
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -790,7 +790,7 @@
 
   // Try allocating obj in to-space (unless too old)
   if (old->age() < tenuring_threshold()) {
-    obj = (oop) to()->allocate(s);
+    obj = (oop) to()->allocate_aligned(s);
   }
 
   // Otherwise try allocating obj tenured
--- a/hotspot/src/share/vm/memory/heapInspection.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/memory/heapInspection.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -135,7 +135,7 @@
   _ref = (HeapWord*) Universe::boolArrayKlassObj();
   _buckets =
     (KlassInfoBucket*)  AllocateHeap(sizeof(KlassInfoBucket) * _num_buckets,
-                                            mtInternal, 0, AllocFailStrategy::RETURN_NULL);
+       mtInternal, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
   if (_buckets != NULL) {
     _size = _num_buckets;
     for (int index = 0; index < _size; index++) {
--- a/hotspot/src/share/vm/memory/memRegion.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/memory/memRegion.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -103,11 +103,13 @@
 }
 
 void* MemRegion::operator new(size_t size) throw() {
-  return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
+  return (address)AllocateHeap(size, mtGC, CURRENT_PC,
+    AllocFailStrategy::RETURN_NULL);
 }
 
 void* MemRegion::operator new [](size_t size) throw() {
-  return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
+  return (address)AllocateHeap(size, mtGC, CURRENT_PC,
+    AllocFailStrategy::RETURN_NULL);
 }
 void  MemRegion::operator delete(void* p) {
   FreeHeap(p, mtGC);
--- a/hotspot/src/share/vm/memory/resourceArea.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/memory/resourceArea.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,11 +49,11 @@
   debug_only(static int _warned;)       // to suppress multiple warnings
 
 public:
-  ResourceArea() {
+  ResourceArea() : Arena(mtThread) {
     debug_only(_nesting = 0;)
   }
 
-  ResourceArea(size_t init_size) : Arena(init_size) {
+  ResourceArea(size_t init_size) : Arena(mtThread, init_size) {
     debug_only(_nesting = 0;);
   }
 
@@ -64,7 +64,7 @@
     if (UseMallocOnly) {
       // use malloc, but save pointer in res. area for later freeing
       char** save = (char**)internal_malloc_4(sizeof(char*));
-      return (*save = (char*)os::malloc(size, mtThread));
+      return (*save = (char*)os::malloc(size, mtThread, CURRENT_PC));
     }
 #endif
     return (char*)Amalloc(size, alloc_failmode);
--- a/hotspot/src/share/vm/memory/sharedHeap.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/memory/sharedHeap.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -159,9 +159,9 @@
 Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false);
 
 void SharedHeap::StrongRootsScope::mark_worker_done_with_threads(uint n_workers) {
-  // The Thread work barrier is only needed by G1.
+  // The Thread work barrier is only needed by G1 Class Unloading.
   // No need to use the barrier if this is single-threaded code.
-  if (UseG1GC && n_workers > 0) {
+  if (UseG1GC && ClassUnloadingWithConcurrentMark && n_workers > 0) {
     uint new_value = (uint)Atomic::add(1, &_n_workers_done_with_threads);
     if (new_value == n_workers) {
       // This thread is last. Notify the others.
@@ -172,6 +172,9 @@
 }
 
 void SharedHeap::StrongRootsScope::wait_until_all_workers_done_with_threads(uint n_workers) {
+  assert(UseG1GC,                          "Currently only used by G1");
+  assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
+
   // No need to use the barrier if this is single-threaded code.
   if (n_workers > 0 && (uint)_n_workers_done_with_threads != n_workers) {
     MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
--- a/hotspot/src/share/vm/memory/space.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/memory/space.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -28,6 +28,7 @@
 #include "gc_implementation/shared/liveRange.hpp"
 #include "gc_implementation/shared/markSweep.hpp"
 #include "gc_implementation/shared/spaceDecorator.hpp"
+#include "gc_interface/collectedHeap.inline.hpp"
 #include "memory/blockOffsetTable.inline.hpp"
 #include "memory/defNewGeneration.hpp"
 #include "memory/genCollectedHeap.hpp"
@@ -720,6 +721,27 @@
   } while (true);
 }
 
+HeapWord* ContiguousSpace::allocate_aligned(size_t size) {
+  assert(Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "not locked");
+  HeapWord* end_value = end();
+
+  HeapWord* obj = CollectedHeap::align_allocation_or_fail(top(), end_value, SurvivorAlignmentInBytes);
+  if (obj == NULL) {
+    return NULL;
+  }
+
+  if (pointer_delta(end_value, obj) >= size) {
+    HeapWord* new_top = obj + size;
+    set_top(new_top);
+    assert(is_ptr_aligned(obj, SurvivorAlignmentInBytes) && is_aligned(new_top),
+      "checking alignment");
+    return obj;
+  } else {
+    set_top(obj);
+    return NULL;
+  }
+}
+
 // Requires locking.
 HeapWord* ContiguousSpace::allocate(size_t size) {
   return allocate_impl(size, end());
--- a/hotspot/src/share/vm/memory/space.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/memory/space.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -526,6 +526,7 @@
   // Allocation (return NULL if full)
   virtual HeapWord* allocate(size_t word_size);
   virtual HeapWord* par_allocate(size_t word_size);
+  HeapWord* allocate_aligned(size_t word_size);
 
   // Iteration
   void oop_iterate(ExtendedOopClosure* cl);
--- a/hotspot/src/share/vm/oops/method.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/oops/method.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -283,6 +283,13 @@
   return bcp;
 }
 
+address Method::bcp_from(address bcp) const {
+  if (is_native() && bcp == NULL) {
+    return code_base();
+  } else {
+    return bcp;
+  }
+}
 
 int Method::size(bool is_native) {
   // If native, then include pointers for native_function and signature_handler
--- a/hotspot/src/share/vm/oops/method.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/oops/method.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -648,7 +648,8 @@
 
   // Returns the byte code index from the byte code pointer
   int     bci_from(address bcp) const;
-  address bcp_from(int     bci) const;
+  address bcp_from(int bci) const;
+  address bcp_from(address bcp) const;
   int validate_bci_from_bcp(address bcp) const;
   int validate_bci(int bci) const;
 
--- a/hotspot/src/share/vm/oops/oop.pcgc.inline.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/oops/oop.pcgc.inline.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -55,8 +55,6 @@
   klass()->oop_follow_contents(cm, this);
 }
 
-// Used by parallel old GC.
-
 inline oop oopDesc::forward_to_atomic(oop p) {
   assert(ParNewGeneration::is_legal_forward_ptr(p),
          "illegal forwarding pointer value.");
--- a/hotspot/src/share/vm/opto/callGenerator.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/callGenerator.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -357,7 +357,7 @@
 
   // Make sure the state is a MergeMem for parsing.
   if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
-    Node* mem = MergeMemNode::make(C, map->in(TypeFunc::Memory));
+    Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
     C->initial_gvn()->set_type_bottom(mem);
     map->set_req(TypeFunc::Memory, mem);
   }
--- a/hotspot/src/share/vm/opto/callnode.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/callnode.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -688,7 +688,7 @@
     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
 
   case TypeFunc::Parms+1:       // For LONG & DOUBLE returns
-    assert(tf()->_range->field_at(TypeFunc::Parms+1) == Type::HALF, "");
+    assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, "");
     // 2nd half of doubles and longs
     return new MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
 
@@ -778,7 +778,7 @@
 }
 
 // Returns the unique CheckCastPP of a call
-// or 'this' if there are several CheckCastPP
+// or 'this' if there are several CheckCastPP or unexpected uses
 // or returns NULL if there is no one.
 Node *CallNode::result_cast() {
   Node *cast = NULL;
@@ -794,6 +794,13 @@
         return this;  // more than 1 CheckCastPP
       }
       cast = use;
+    } else if (!use->is_Initialize() &&
+               !use->is_AddP()) {
+      // Expected uses are restricted to a CheckCastPP, an Initialize
+      // node, and AddP nodes. If we encounter any other use (a Phi
+      // node can be seen in rare cases) return this to prevent
+      // incorrect optimizations.
+      return this;
     }
   }
   return cast;
--- a/hotspot/src/share/vm/opto/cfgnode.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/cfgnode.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -108,6 +108,7 @@
 
         rreq++;                 // One more input to Region
       } // Found a region to merge into Region
+      igvn->_worklist.push(r);
       // Clobber pointer to the now dead 'r'
       region->set_req(i, phase->C->top());
     }
@@ -449,6 +450,7 @@
   // Remove TOP or NULL input paths. If only 1 input path remains, this Region
   // degrades to a copy.
   bool add_to_worklist = false;
+  bool modified = false;
   int cnt = 0;                  // Count of values merging
   DEBUG_ONLY( int cnt_orig = req(); ) // Save original inputs count
   int del_it = 0;               // The last input path we delete
@@ -459,6 +461,7 @@
       // Remove useless control copy inputs
       if( n->is_Region() && n->as_Region()->is_copy() ) {
         set_req(i, n->nonnull_req());
+        modified = true;
         i--;
         continue;
       }
@@ -466,12 +469,14 @@
         Node *call = n->in(0);
         if (call->is_Call() && call->as_Call()->entry_point() == OptoRuntime::rethrow_stub()) {
           set_req(i, call->in(0));
+          modified = true;
           i--;
           continue;
         }
       }
       if( phase->type(n) == Type::TOP ) {
         set_req(i, NULL);       // Ignore TOP inputs
+        modified = true;
         i--;
         continue;
       }
@@ -691,7 +696,7 @@
     }
   }
 
-  return NULL;
+  return modified ? this : NULL;
 }
 
 
@@ -1871,7 +1876,7 @@
           igvn->register_new_node_with_optimizer(new_base);
           hook->add_req(new_base);
         }
-        MergeMemNode* result = MergeMemNode::make(phase->C, new_base);
+        MergeMemNode* result = MergeMemNode::make(new_base);
         for (uint i = 1; i < req(); ++i) {
           Node *ii = in(i);
           if (ii->is_MergeMem()) {
--- a/hotspot/src/share/vm/opto/chaitin.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/chaitin.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1620,7 +1620,7 @@
           C->check_node_count(0, "out of nodes fixing spills");
           if (C->failing())  return;
           // Transform node
-          MachNode *cisc = mach->cisc_version(stk_offset, C)->as_Mach();
+          MachNode *cisc = mach->cisc_version(stk_offset)->as_Mach();
           cisc->set_req(inp,fp);          // Base register is frame pointer
           if( cisc->oper_input_base() > 1 && mach->oper_input_base() <= 1 ) {
             assert( cisc->oper_input_base() == 2, "Only adding one edge");
--- a/hotspot/src/share/vm/opto/compile.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -647,6 +647,10 @@
                   _printer(IdealGraphPrinter::printer()),
 #endif
                   _congraph(NULL),
+                  _comp_arena(mtCompiler),
+                  _node_arena(mtCompiler),
+                  _old_arena(mtCompiler),
+                  _Compile_types(mtCompiler),
                   _replay_inline_data(NULL),
                   _late_inlines(comp_arena(), 2, 0, NULL),
                   _string_late_inlines(comp_arena(), 2, 0, NULL),
@@ -954,6 +958,10 @@
     _in_dump_cnt(0),
     _printer(NULL),
 #endif
+    _comp_arena(mtCompiler),
+    _node_arena(mtCompiler),
+    _old_arena(mtCompiler),
+    _Compile_types(mtCompiler),
     _dead_node_list(comp_arena()),
     _dead_node_count(0),
     _congraph(NULL),
@@ -1039,6 +1047,7 @@
 
   _node_note_array = NULL;
   _default_node_notes = NULL;
+  DEBUG_ONLY( _modified_nodes = NULL; ) // Used in Optimize()
 
   _immutable_memory = NULL; // filled in at first inquiry
 
@@ -1247,6 +1256,18 @@
     }
   }
 }
+void Compile::record_modified_node(Node* n) {
+  if (_modified_nodes != NULL && !_inlining_incrementally &&
+      n->outcnt() != 0 && !n->is_Con()) {
+    _modified_nodes->push(n);
+  }
+}
+
+void Compile::remove_modified_node(Node* n) {
+  if (_modified_nodes != NULL) {
+    _modified_nodes->remove(n);
+  }
+}
 #endif
 
 #ifndef PRODUCT
@@ -2035,6 +2056,9 @@
   // Iterative Global Value Numbering, including ideal transforms
   // Initialize IterGVN with types and values from parse-time GVN
   PhaseIterGVN igvn(initial_gvn());
+#ifdef ASSERT
+  _modified_nodes = new (comp_arena()) Unique_Node_List(comp_arena());
+#endif
   {
     NOT_PRODUCT( TracePhase t2("iterGVN", &_t_iterGVN, TimeCompiler); )
     igvn.optimize();
@@ -2197,6 +2221,7 @@
     }
   }
 
+  DEBUG_ONLY( _modified_nodes = NULL; )
  } // (End scope of igvn; run destructor if necessary for asserts.)
 
   process_print_inlining();
@@ -2825,7 +2850,7 @@
           // oops implicit null check is not generated.
           // This will allow to generate normal oop implicit null check.
           if (Matcher::gen_narrow_oop_implicit_null_checks())
-            new_in2 = ConNode::make(this, TypeNarrowOop::NULL_PTR);
+            new_in2 = ConNode::make(TypeNarrowOop::NULL_PTR);
           //
           // This transformation together with CastPP transformation above
           // will generated code for implicit NULL checks for compressed oops.
@@ -2864,9 +2889,9 @@
           //    NullCheck base_reg
           //
         } else if (t->isa_oopptr()) {
-          new_in2 = ConNode::make(this, t->make_narrowoop());
+          new_in2 = ConNode::make(t->make_narrowoop());
         } else if (t->isa_klassptr()) {
-          new_in2 = ConNode::make(this, t->make_narrowklass());
+          new_in2 = ConNode::make(t->make_narrowklass());
         }
       }
       if (new_in2 != NULL) {
@@ -2899,11 +2924,11 @@
       const Type* t = in1->bottom_type();
       if (t == TypePtr::NULL_PTR) {
         assert(t->isa_oopptr(), "null klass?");
-        n->subsume_by(ConNode::make(this, TypeNarrowOop::NULL_PTR), this);
+        n->subsume_by(ConNode::make(TypeNarrowOop::NULL_PTR), this);
       } else if (t->isa_oopptr()) {
-        n->subsume_by(ConNode::make(this, t->make_narrowoop()), this);
+        n->subsume_by(ConNode::make(t->make_narrowoop()), this);
       } else if (t->isa_klassptr()) {
-        n->subsume_by(ConNode::make(this, t->make_narrowklass()), this);
+        n->subsume_by(ConNode::make(t->make_narrowklass()), this);
       }
     }
     if (in1->outcnt() == 0) {
@@ -2964,7 +2989,7 @@
       if (d) {
         // Replace them with a fused divmod if supported
         if (Matcher::has_match_rule(Op_DivModI)) {
-          DivModINode* divmod = DivModINode::make(this, n);
+          DivModINode* divmod = DivModINode::make(n);
           d->subsume_by(divmod->div_proj(), this);
           n->subsume_by(divmod->mod_proj(), this);
         } else {
@@ -2984,7 +3009,7 @@
       if (d) {
         // Replace them with a fused divmod if supported
         if (Matcher::has_match_rule(Op_DivModL)) {
-          DivModLNode* divmod = DivModLNode::make(this, n);
+          DivModLNode* divmod = DivModLNode::make(n);
           d->subsume_by(divmod->div_proj(), this);
           n->subsume_by(divmod->mod_proj(), this);
         } else {
@@ -3010,7 +3035,7 @@
     if (n->req()-1 > 2) {
       // Replace many operand PackNodes with a binary tree for matching
       PackNode* p = (PackNode*) n;
-      Node* btp = p->binary_tree_pack(this, 1, n->req());
+      Node* btp = p->binary_tree_pack(1, n->req());
       n->subsume_by(btp, this);
     }
     break;
@@ -3035,11 +3060,11 @@
       if (t != NULL && t->is_con()) {
         juint shift = t->get_con();
         if (shift > mask) { // Unsigned cmp
-          n->set_req(2, ConNode::make(this, TypeInt::make(shift & mask)));
+          n->set_req(2, ConNode::make(TypeInt::make(shift & mask)));
         }
       } else {
         if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) {
-          Node* shift = new AndINode(in2, ConNode::make(this, TypeInt::make(mask)));
+          Node* shift = new AndINode(in2, ConNode::make(TypeInt::make(mask)));
           n->set_req(2, shift);
         }
       }
@@ -4031,6 +4056,7 @@
   int j = 0;
   int identical = 0;
   int i = 0;
+  bool modified = false;
   for (; i < _expensive_nodes->length()-1; i++) {
     assert(j <= i, "can't write beyond current index");
     if (_expensive_nodes->at(i)->Opcode() == _expensive_nodes->at(i+1)->Opcode()) {
@@ -4043,20 +4069,23 @@
       identical = 0;
     } else {
       Node* n = _expensive_nodes->at(i);
-      igvn.hash_delete(n);
-      n->set_req(0, NULL);
+      igvn.replace_input_of(n, 0, NULL);
       igvn.hash_insert(n);
+      modified = true;
     }
   }
   if (identical > 0) {
     _expensive_nodes->at_put(j++, _expensive_nodes->at(i));
   } else if (_expensive_nodes->length() >= 1) {
     Node* n = _expensive_nodes->at(i);
-    igvn.hash_delete(n);
-    n->set_req(0, NULL);
+    igvn.replace_input_of(n, 0, NULL);
     igvn.hash_insert(n);
+    modified = true;
   }
   _expensive_nodes->trunc_to(j);
+  if (modified) {
+    igvn.optimize();
+  }
 }
 
 void Compile::add_expensive_node(Node * n) {
--- a/hotspot/src/share/vm/opto/compile.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/compile.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -344,6 +344,8 @@
   VectorSet             _dead_node_list;        // Set of dead nodes
   uint                  _dead_node_count;       // Number of dead nodes; VectorSet::Size() is O(N).
                                                 // So use this to keep count and make the call O(1).
+  DEBUG_ONLY( Unique_Node_List* _modified_nodes; )  // List of nodes which inputs were modified
+
   debug_only(static int _debug_idx;)            // Monotonic counter (not reset), use -XX:BreakAtNode=<idx>
   Arena                 _node_arena;            // Arena for new-space Nodes
   Arena                 _old_arena;             // Arena for old-space Nodes, lifetime during xform
@@ -766,6 +768,11 @@
   void         print_missing_nodes();
 #endif
 
+  // Record modified nodes to check that they are put on IGVN worklist
+  void         record_modified_node(Node* n) NOT_DEBUG_RETURN;
+  void         remove_modified_node(Node* n) NOT_DEBUG_RETURN;
+  DEBUG_ONLY( Unique_Node_List*   modified_nodes() const { return _modified_nodes; } )
+
   // Constant table
   ConstantTable&   constant_table() { return _constant_table; }
 
--- a/hotspot/src/share/vm/opto/connode.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/connode.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,7 +43,7 @@
 }
 
 //------------------------------make-------------------------------------------
-ConNode *ConNode::make( Compile* C, const Type *t ) {
+ConNode *ConNode::make(const Type *t) {
   switch( t->basic_type() ) {
   case T_INT:         return new ConINode( t->is_int() );
   case T_LONG:        return new ConLNode( t->is_long() );
--- a/hotspot/src/share/vm/opto/connode.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/connode.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,7 +46,7 @@
   virtual const RegMask &in_RegMask(uint) const { return RegMask::Empty; }
 
   // Polymorphic factory method:
-  static ConNode* make( Compile* C, const Type *t );
+  static ConNode* make(const Type *t);
 };
 
 //------------------------------ConINode---------------------------------------
@@ -57,7 +57,7 @@
   virtual int Opcode() const;
 
   // Factory method:
-  static ConINode* make( Compile* C, int con ) {
+  static ConINode* make(int con) {
     return new ConINode( TypeInt::make(con) );
   }
 
@@ -71,7 +71,7 @@
   virtual int Opcode() const;
 
   // Factory methods:
-  static ConPNode* make( Compile *C ,address con ) {
+  static ConPNode* make(address con) {
     if (con == NULL)
       return new ConPNode( TypePtr::NULL_PTR ) ;
     else
@@ -105,7 +105,7 @@
   virtual int Opcode() const;
 
   // Factory method:
-  static ConLNode* make( Compile *C ,jlong con ) {
+  static ConLNode* make(jlong con) {
     return new ConLNode( TypeLong::make(con) );
   }
 
@@ -119,7 +119,7 @@
   virtual int Opcode() const;
 
   // Factory method:
-  static ConFNode* make( Compile *C, float con  ) {
+  static ConFNode* make(float con) {
     return new ConFNode( TypeF::make(con) );
   }
 
@@ -133,7 +133,7 @@
   virtual int Opcode() const;
 
   // Factory method:
-  static ConDNode* make( Compile *C, double con ) {
+  static ConDNode* make(double con) {
     return new ConDNode( TypeD::make(con) );
   }
 
--- a/hotspot/src/share/vm/opto/divnode.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/divnode.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -479,7 +479,10 @@
 
   if (i == 0) return NULL;      // Dividing by zero constant does not idealize
 
-  set_req(0,NULL);              // Dividing by a not-zero constant; no faulting
+  if (in(0) != NULL) {
+    phase->igvn_rehash_node_delayed(this);
+    set_req(0, NULL);           // Dividing by a not-zero constant; no faulting
+  }
 
   // Dividing by MININT does not optimize as a power-of-2 shift.
   if( i == min_jint ) return NULL;
@@ -578,7 +581,10 @@
 
   if (l == 0) return NULL;      // Dividing by zero constant does not idealize
 
-  set_req(0,NULL);              // Dividing by a not-zero constant; no faulting
+  if (in(0) != NULL) {
+    phase->igvn_rehash_node_delayed(this);
+    set_req(0, NULL);           // Dividing by a not-zero constant; no faulting
+  }
 
   // Dividing by MINLONG does not optimize as a power-of-2 shift.
   if( l == min_jlong ) return NULL;
@@ -1274,7 +1280,7 @@
 }
 
 //------------------------------make------------------------------------------
-DivModINode* DivModINode::make(Compile* C, Node* div_or_mod) {
+DivModINode* DivModINode::make(Node* div_or_mod) {
   Node* n = div_or_mod;
   assert(n->Opcode() == Op_DivI || n->Opcode() == Op_ModI,
          "only div or mod input pattern accepted");
@@ -1286,7 +1292,7 @@
 }
 
 //------------------------------make------------------------------------------
-DivModLNode* DivModLNode::make(Compile* C, Node* div_or_mod) {
+DivModLNode* DivModLNode::make(Node* div_or_mod) {
   Node* n = div_or_mod;
   assert(n->Opcode() == Op_DivL || n->Opcode() == Op_ModL,
          "only div or mod input pattern accepted");
--- a/hotspot/src/share/vm/opto/divnode.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/divnode.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -168,7 +168,7 @@
   virtual Node *match( const ProjNode *proj, const Matcher *m );
 
   // Make a divmod and associated projections from a div or mod.
-  static DivModINode* make(Compile* C, Node* div_or_mod);
+  static DivModINode* make(Node* div_or_mod);
 };
 
 //------------------------------DivModLNode---------------------------------------
@@ -181,7 +181,7 @@
   virtual Node *match( const ProjNode *proj, const Matcher *m );
 
   // Make a divmod and associated projections from a div or mod.
-  static DivModLNode* make(Compile* C, Node* div_or_mod);
+  static DivModLNode* make(Node* div_or_mod);
 };
 
 #endif // SHARE_VM_OPTO_DIVNODE_HPP
--- a/hotspot/src/share/vm/opto/escape.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/escape.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1452,7 +1452,6 @@
     return 0;
 
   InitializeNode* ini = alloc->as_Allocate()->initialization();
-  Compile* C = _compile;
   bool visited_bottom_offset = false;
   GrowableArray<int> offsets_worklist;
 
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -591,7 +591,7 @@
         C->log()->elem("hot_throw preallocated='1' reason='%s'",
                        Deoptimization::trap_reason_name(reason));
       const TypeInstPtr* ex_con  = TypeInstPtr::make(ex_obj);
-      Node*              ex_node = _gvn.transform( ConNode::make(C, ex_con) );
+      Node*              ex_node = _gvn.transform(ConNode::make(ex_con));
 
       // Clear the detail message of the preallocated exception object.
       // Weblogic sometimes mutates the detail message of exceptions
@@ -706,7 +706,7 @@
   if (map() == NULL)  return NULL;
 
   // Clone the memory edge first
-  Node* mem = MergeMemNode::make(C, map()->memory());
+  Node* mem = MergeMemNode::make(map()->memory());
   gvn().set_type_bottom(mem);
 
   SafePointNode *clonemap = (SafePointNode*)map()->clone();
@@ -1135,7 +1135,7 @@
     return longcon((julong) offset_con);
   }
   Node* conv = _gvn.transform( new ConvI2LNode(offset));
-  Node* mask = _gvn.transform( ConLNode::make(C, (julong) max_juint) );
+  Node* mask = _gvn.transform(ConLNode::make((julong) max_juint));
   return _gvn.transform( new AndLNode(conv, mask) );
 }
 
@@ -1435,7 +1435,7 @@
 
 //------------------------------set_all_memory---------------------------------
 void GraphKit::set_all_memory(Node* newmem) {
-  Node* mergemem = MergeMemNode::make(C, newmem);
+  Node* mergemem = MergeMemNode::make(newmem);
   gvn().set_type_bottom(mergemem);
   map()->set_memory(mergemem);
 }
@@ -1464,9 +1464,9 @@
   Node* mem = memory(adr_idx);
   Node* ld;
   if (require_atomic_access && bt == T_LONG) {
-    ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo);
+    ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo);
   } else if (require_atomic_access && bt == T_DOUBLE) {
-    ld = LoadDNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo);
+    ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo);
   } else {
     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo);
   }
@@ -1488,9 +1488,9 @@
   Node *mem = memory(adr_idx);
   Node* st;
   if (require_atomic_access && bt == T_LONG) {
-    st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
+    st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
   } else if (require_atomic_access && bt == T_DOUBLE) {
-    st = StoreDNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
+    st = StoreDNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
   } else {
     st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
   }
@@ -2084,9 +2084,9 @@
 void GraphKit::round_double_arguments(ciMethod* dest_method) {
   // (Note:  TypeFunc::make has a cache that makes this fast.)
   const TypeFunc* tf    = TypeFunc::make(dest_method);
-  int             nargs = tf->_domain->_cnt - TypeFunc::Parms;
+  int             nargs = tf->domain()->cnt() - TypeFunc::Parms;
   for (int j = 0; j < nargs; j++) {
-    const Type *targ = tf->_domain->field_at(j + TypeFunc::Parms);
+    const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
     if( targ->basic_type() == T_DOUBLE ) {
       // If any parameters are doubles, they must be rounded before
       // the call, dstore_rounding does gvn.transform
@@ -2188,10 +2188,10 @@
     return;
   }
   const TypeFunc* tf    = TypeFunc::make(dest_method);
-  int             nargs = tf->_domain->_cnt - TypeFunc::Parms;
+  int             nargs = tf->domain()->cnt() - TypeFunc::Parms;
   int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
   for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
-    const Type *targ = tf->_domain->field_at(j + TypeFunc::Parms);
+    const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
     if (targ->basic_type() == T_OBJECT || targ->basic_type() == T_ARRAY) {
       bool maybe_null = true;
       ciKlass* better_type = NULL;
@@ -3364,7 +3364,7 @@
     // This will allow us to observe initializations when they occur,
     // and link them properly (as a group) to the InitializeNode.
     assert(init->in(InitializeNode::Memory) == malloc, "");
-    MergeMemNode* minit_in = MergeMemNode::make(C, malloc);
+    MergeMemNode* minit_in = MergeMemNode::make(malloc);
     init->set_req(InitializeNode::Memory, minit_in);
     record_for_igvn(minit_in); // fold it up later, if possible
     Node* minit_out = memory(rawidx);
--- a/hotspot/src/share/vm/opto/idealKit.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/idealKit.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -320,7 +320,7 @@
   Node* ns = new_cvstate();
   for (uint i = 0; i < ns->req(); i++) ns->init_req(i, _cvstate->in(i));
   // We must clone memory since it will be updated as we do stores.
-  ns->set_req(TypeFunc::Memory, MergeMemNode::make(C, ns->in(TypeFunc::Memory)));
+  ns->set_req(TypeFunc::Memory, MergeMemNode::make(ns->in(TypeFunc::Memory)));
   return ns;
 }
 
@@ -359,7 +359,7 @@
   Node* mem = memory(adr_idx);
   Node* ld;
   if (require_atomic_access && bt == T_LONG) {
-    ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t, MemNode::unordered);
+    ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, MemNode::unordered);
   } else {
     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, MemNode::unordered);
   }
@@ -375,7 +375,7 @@
   Node *mem = memory(adr_idx);
   Node* st;
   if (require_atomic_access && bt == T_LONG) {
-    st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
+    st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
   } else {
     st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
   }
--- a/hotspot/src/share/vm/opto/lcm.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/lcm.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -464,7 +464,9 @@
         iop == Op_CreateEx ||   // Create-exception must start block
         iop == Op_CheckCastPP
         ) {
-      worklist.map(i,worklist.pop());
+      // select the node n
+      // remove n from worklist and retain the order of remaining nodes
+      worklist.remove((uint)i);
       return n;
     }
 
@@ -550,7 +552,9 @@
   assert(idx >= 0, "index should be set");
   Node *n = worklist[(uint)idx];      // Get the winner
 
-  worklist.map((uint)idx, worklist.pop());     // Compress worklist
+  // select the node n
+  // remove n from worklist and retain the order of remaining nodes
+  worklist.remove((uint)idx);
   return n;
 }
 
--- a/hotspot/src/share/vm/opto/library_call.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1905,7 +1905,7 @@
     Node *bolyplus1 = _gvn.transform(new BoolNode( cmpyplus1, BoolTest::eq ));
     Node* correctedsign = NULL;
     if (ConditionalMoveLimit != 0) {
-      correctedsign = _gvn.transform( CMoveNode::make(C, NULL, bolyplus1, signnode, longcon(0), TypeLong::LONG));
+      correctedsign = _gvn.transform(CMoveNode::make(NULL, bolyplus1, signnode, longcon(0), TypeLong::LONG));
     } else {
       IfNode *ifyplus1 = create_and_xform_if(ylong_path,bolyplus1, PROB_FAIR, COUNT_UNKNOWN);
       RegionNode *r = new RegionNode(3);
@@ -1934,7 +1934,7 @@
     // (1&(long)y)==1?-DPow(abs(x), y):DPow(abs(x), y)
     Node *signresult = NULL;
     if (ConditionalMoveLimit != 0) {
-      signresult = _gvn.transform( CMoveNode::make(C, NULL, bol3, absxpowy, negabsxpowy, Type::DOUBLE));
+      signresult = _gvn.transform(CMoveNode::make(NULL, bol3, absxpowy, negabsxpowy, Type::DOUBLE));
     } else {
       IfNode *ifyeven = create_and_xform_if(ylong_path,bol3, PROB_FAIR, COUNT_UNKNOWN);
       RegionNode *r = new RegionNode(3);
@@ -2268,7 +2268,7 @@
   // which could hinder other optimizations.
   // Since Math.min/max is often used with arraycopy, we want
   // tightly_coupled_allocation to be able to see beyond min/max expressions.
-  Node* cmov = CMoveNode::make(C, NULL, best_bol,
+  Node* cmov = CMoveNode::make(NULL, best_bol,
                                answer_if_false, answer_if_true,
                                TypeInt::make(lo, hi, widen));
 
--- a/hotspot/src/share/vm/opto/loopPredicate.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/loopPredicate.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -107,8 +107,7 @@
     rgn = new RegionNode(1);
     rgn->add_req(uncommon_proj);
     register_control(rgn, loop, uncommon_proj);
-    _igvn.hash_delete(call);
-    call->set_req(0, rgn);
+    _igvn.replace_input_of(call, 0, rgn);
     // When called from beautify_loops() idom is not constructed yet.
     if (_idom != NULL) {
       set_idom(call, rgn, dom_depth(rgn));
@@ -166,8 +165,7 @@
 
   if (new_entry == NULL) {
     // Attach if_cont to iff
-    _igvn.hash_delete(iff);
-    iff->set_req(0, if_cont);
+    _igvn.replace_input_of(iff, 0, if_cont);
     if (_idom != NULL) {
       set_idom(iff, if_cont, dom_depth(iff));
     }
@@ -194,8 +192,7 @@
     rgn = new RegionNode(1);
     register_new_node_with_optimizer(rgn);
     rgn->add_req(uncommon_proj);
-    hash_delete(call);
-    call->set_req(0, rgn);
+    replace_input_of(call, 0, rgn);
   } else {
     // Find region's edge corresponding to uncommon_proj
     for (; proj_index < rgn->req(); proj_index++)
--- a/hotspot/src/share/vm/opto/loopTransform.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/loopTransform.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -924,15 +924,13 @@
   if( bol->outcnt() != 1 ) {
     bol = bol->clone();
     register_new_node(bol,main_end->in(CountedLoopEndNode::TestControl));
-    _igvn.hash_delete(main_end);
-    main_end->set_req(CountedLoopEndNode::TestValue, bol);
+    _igvn.replace_input_of(main_end, CountedLoopEndNode::TestValue, bol);
   }
   // Need only 1 user of 'cmp' because I will be hacking the loop bounds.
   if( cmp->outcnt() != 1 ) {
     cmp = cmp->clone();
     register_new_node(cmp,main_end->in(CountedLoopEndNode::TestControl));
-    _igvn.hash_delete(bol);
-    bol->set_req(1, cmp);
+    _igvn.replace_input_of(bol, 1, cmp);
   }
 
   //------------------------------
@@ -1118,8 +1116,7 @@
     Node* pre_bol = pre_end->in(CountedLoopEndNode::TestValue)->as_Bool();
     BoolNode* new_bol0 = new BoolNode(pre_bol->in(1), new_test);
     register_new_node( new_bol0, pre_head->in(0) );
-    _igvn.hash_delete(pre_end);
-    pre_end->set_req(CountedLoopEndNode::TestValue, new_bol0);
+    _igvn.replace_input_of(pre_end, CountedLoopEndNode::TestValue, new_bol0);
     // Modify main loop guard condition
     assert(min_iff->in(CountedLoopEndNode::TestValue) == min_bol, "guard okay");
     BoolNode* new_bol1 = new BoolNode(min_bol->in(1), new_test);
@@ -1130,8 +1127,7 @@
     BoolNode* main_bol = main_end->in(CountedLoopEndNode::TestValue)->as_Bool();
     BoolNode* new_bol2 = new BoolNode(main_bol->in(1), new_test);
     register_new_node( new_bol2, main_end->in(CountedLoopEndNode::TestControl) );
-    _igvn.hash_delete(main_end);
-    main_end->set_req(CountedLoopEndNode::TestValue, new_bol2);
+    _igvn.replace_input_of(main_end, CountedLoopEndNode::TestValue, new_bol2);
   }
 
   // Flag main loop
@@ -1346,8 +1342,7 @@
         Node* bol2 = loop_end->in(1)->clone();
         bol2->set_req(1, cmp2);
         register_new_node(bol2, ctrl2);
-        _igvn.hash_delete(loop_end);
-        loop_end->set_req(1, bol2);
+        _igvn.replace_input_of(loop_end, 1, bol2);
       }
       // Step 3: Find the min-trip test guaranteed before a 'main' loop.
       // Make it a 1-trip test (means at least 2 trips).
@@ -1356,8 +1351,7 @@
       // can edit it's inputs directly.  Hammer in the new limit for the
       // minimum-trip guard.
       assert(opaq->outcnt() == 1, "");
-      _igvn.hash_delete(opaq);
-      opaq->set_req(1, new_limit);
+      _igvn.replace_input_of(opaq, 1, new_limit);
     }
 
     // Adjust max trip count. The trip count is intentionally rounded
@@ -1407,8 +1401,7 @@
     register_new_node( cmp2, ctrl2 );
     Node *bol2 = new BoolNode( cmp2, loop_end->test_trip() );
     register_new_node( bol2, ctrl2 );
-    _igvn.hash_delete(loop_end);
-    loop_end->set_req(CountedLoopEndNode::TestValue, bol2);
+    _igvn.replace_input_of(loop_end, CountedLoopEndNode::TestValue, bol2);
 
     // Step 3: Find the min-trip test guaranteed before a 'main' loop.
     // Make it a 1-trip test (means at least 2 trips).
@@ -1997,8 +1990,7 @@
                                  : (Node*)new MaxINode(pre_limit, orig_limit);
     register_new_node(pre_limit, pre_ctrl);
   }
-  _igvn.hash_delete(pre_opaq);
-  pre_opaq->set_req(1, pre_limit);
+  _igvn.replace_input_of(pre_opaq, 1, pre_limit);
 
   // Note:: we are making the main loop limit no longer precise;
   // need to round up based on stride.
@@ -2027,10 +2019,9 @@
   Node *main_bol = main_cle->in(1);
   // Hacking loop bounds; need private copies of exit test
   if( main_bol->outcnt() > 1 ) {// BoolNode shared?
-    _igvn.hash_delete(main_cle);
     main_bol = main_bol->clone();// Clone a private BoolNode
     register_new_node( main_bol, main_cle->in(0) );
-    main_cle->set_req(1,main_bol);
+    _igvn.replace_input_of(main_cle, 1, main_bol);
   }
   Node *main_cmp = main_bol->in(1);
   if( main_cmp->outcnt() > 1 ) { // CmpNode shared?
--- a/hotspot/src/share/vm/opto/loopnode.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/loopnode.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -133,7 +133,7 @@
   // Return earliest legal location
   assert(early == find_non_split_ctrl(early), "unexpected early control");
 
-  if (n->is_expensive()) {
+  if (n->is_expensive() && !_verify_only && !_verify_me) {
     assert(n->in(0), "should have control input");
     early = get_early_ctrl_for_expensive(n, early);
   }
@@ -226,8 +226,7 @@
   }
 
   if (ctl != n->in(0)) {
-    _igvn.hash_delete(n);
-    n->set_req(0, ctl);
+    _igvn.replace_input_of(n, 0, ctl);
     _igvn.hash_insert(n);
   }
 
@@ -521,8 +520,7 @@
     assert(check_iff->in(1)->Opcode() == Op_Conv2B &&
            check_iff->in(1)->in(1)->Opcode() == Op_Opaque1, "");
     Node* opq = check_iff->in(1)->in(1);
-    _igvn.hash_delete(opq);
-    opq->set_req(1, bol);
+    _igvn.replace_input_of(opq, 1, bol);
     // Update ctrl.
     set_ctrl(opq, check_iff->in(0));
     set_ctrl(check_iff->in(1), check_iff->in(0));
@@ -690,7 +688,7 @@
   incr->set_req(2,stride);
   incr = _igvn.register_new_node_with_optimizer(incr);
   set_early_ctrl( incr );
-  _igvn.hash_delete(phi);
+  _igvn.rehash_node_delayed(phi);
   phi->set_req_X( LoopNode::LoopBackControl, incr, &_igvn );
 
   // If phi type is more restrictive than Int, raise to
@@ -743,8 +741,8 @@
     iffalse = iff2;
     iftrue  = ift2;
   } else {
-    _igvn.hash_delete(iffalse);
-    _igvn.hash_delete(iftrue);
+    _igvn.rehash_node_delayed(iffalse);
+    _igvn.rehash_node_delayed(iftrue);
     iffalse->set_req_X( 0, le, &_igvn );
     iftrue ->set_req_X( 0, le, &_igvn );
   }
@@ -1257,6 +1255,7 @@
       _head->del_req(i);
     }
   }
+  igvn.rehash_node_delayed(_head);
   // Transform landing pad
   igvn.register_new_node_with_optimizer(landing_pad, _head);
   // Insert landing pad into the header
@@ -1397,7 +1396,7 @@
   igvn.register_new_node_with_optimizer(r, _head);
   // Plug region into end of loop _head, followed by hot_tail
   while( _head->req() > 3 ) _head->del_req( _head->req()-1 );
-  _head->set_req(2, r);
+  igvn.replace_input_of(_head, 2, r);
   if( hot_idx ) _head->add_req(hot_tail);
 
   // Split all the Phis up between '_head' loop and the Region 'r'
@@ -1419,7 +1418,7 @@
       igvn.register_new_node_with_optimizer(phi, n);
       // Add the merge phi to the old Phi
       while( n->req() > 3 ) n->del_req( n->req()-1 );
-      n->set_req(2, phi);
+      igvn.replace_input_of(n, 2, phi);
       if( hot_idx ) n->add_req(hot_phi);
     }
   }
@@ -1495,13 +1494,14 @@
   if( fall_in_cnt > 1 ) {
     // Since I am just swapping inputs I do not need to update def-use info
     Node *tmp = _head->in(1);
+    igvn.rehash_node_delayed(_head);
     _head->set_req( 1, _head->in(fall_in_cnt) );
     _head->set_req( fall_in_cnt, tmp );
     // Swap also all Phis
     for (DUIterator_Fast imax, i = _head->fast_outs(imax); i < imax; i++) {
       Node* phi = _head->fast_out(i);
       if( phi->is_Phi() ) {
-        igvn.hash_delete(phi); // Yank from hash before hacking edges
+        igvn.rehash_node_delayed(phi); // Yank from hash before hacking edges
         tmp = phi->in(1);
         phi->set_req( 1, phi->in(fall_in_cnt) );
         phi->set_req( fall_in_cnt, tmp );
@@ -2905,6 +2905,7 @@
           uint k = 0;             // Probably cfg->in(0)
           while( cfg->in(k) != m ) k++; // But check incase cfg is a Region
           cfg->set_req( k, if_t ); // Now point to NeverBranch
+          _igvn._worklist.push(cfg);
 
           // Now create the never-taken loop exit
           Node *if_f = new CProjNode( iff, 1 );
--- a/hotspot/src/share/vm/opto/loopopts.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/loopopts.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -625,7 +625,7 @@
         set_ctrl(inp, cmov_ctrl);
       }
     }
-    Node *cmov = CMoveNode::make( C, cmov_ctrl, iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi) );
+    Node *cmov = CMoveNode::make(cmov_ctrl, iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi));
     register_new_node( cmov, cmov_ctrl );
     _igvn.replace_node( phi, cmov );
 #ifndef PRODUCT
@@ -2574,7 +2574,7 @@
   new_head->set_unswitch_count(head->unswitch_count()); // Preserve
   _igvn.register_new_node_with_optimizer(new_head);
   assert(first_not_peeled->in(0) == last_peel, "last_peel <- first_not_peeled");
-  first_not_peeled->set_req(0, new_head);
+  _igvn.replace_input_of(first_not_peeled, 0, new_head);
   set_loop(new_head, loop);
   loop->_body.push(new_head);
   not_peel.set(new_head->_idx);
--- a/hotspot/src/share/vm/opto/machnode.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/machnode.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -178,7 +178,7 @@
 }
 
 // Return an equivalent instruction using memory for cisc_operand position
-MachNode *MachNode::cisc_version(int offset, Compile* C) {
+MachNode *MachNode::cisc_version(int offset) {
   ShouldNotCallThis();
   return NULL;
 }
@@ -411,7 +411,7 @@
 
 //------------------------------peephole---------------------------------------
 // Apply peephole rule(s) to this instruction
-MachNode *MachNode::peephole( Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted, Compile* C ) {
+MachNode *MachNode::peephole(Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted) {
   return NULL;
 }
 
--- a/hotspot/src/share/vm/opto/machnode.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/machnode.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -152,7 +152,7 @@
   virtual uint  cmp( const MachOper &oper ) const;
 
   // Virtual clone, since I do not know how big the MachOper is.
-  virtual MachOper *clone(Compile* C) const = 0;
+  virtual MachOper *clone() const = 0;
 
   // Return ideal Type from simple operands.  Fail for complex operands.
   virtual const Type *type() const;
@@ -202,10 +202,10 @@
   // Copy inputs and operands to new node of instruction.
   // Called from cisc_version() and short_branch_version().
   // !!!! The method's body is defined in ad_<arch>.cpp file.
-  void fill_new_machnode(MachNode *n, Compile* C) const;
+  void fill_new_machnode(MachNode *n) const;
 
   // Return an equivalent instruction using memory for cisc_operand position
-  virtual MachNode *cisc_version(int offset, Compile* C);
+  virtual MachNode *cisc_version(int offset);
   // Modify this instruction's register mask to use stack version for cisc_operand
   virtual void use_cisc_RegMask();
 
@@ -317,7 +317,7 @@
   virtual const class TypePtr *adr_type() const;
 
   // Apply peephole rule(s) to this instruction
-  virtual MachNode *peephole( Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted, Compile* C );
+  virtual MachNode *peephole(Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted);
 
   // Top-level ideal Opcode matched
   virtual int ideal_Opcode()     const { return Op_Node; }
@@ -627,7 +627,7 @@
   virtual void save_label(Label** label, uint* block_num) = 0;
 
   // Support for short branches
-  virtual MachNode *short_branch_version(Compile* C) { return NULL; }
+  virtual MachNode *short_branch_version() { return NULL; }
 
   virtual bool pinned() const { return true; };
 };
@@ -985,7 +985,7 @@
 
   labelOper(labelOper* l) : _label(l->_label) , _block_num(l->_block_num) {}
 
-  virtual MachOper *clone(Compile* C) const;
+  virtual MachOper *clone() const;
 
   virtual Label *label() const { assert(_label != NULL, "need Label"); return _label; }
 
@@ -1012,7 +1012,7 @@
   methodOper() :   _method(0) {}
   methodOper(intptr_t method) : _method(method)  {}
 
-  virtual MachOper *clone(Compile* C) const;
+  virtual MachOper *clone() const;
 
   virtual intptr_t method() const { return _method; }
 
--- a/hotspot/src/share/vm/opto/macro.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/macro.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -702,6 +702,7 @@
   ciType* elem_type;
 
   Node* res = alloc->result_cast();
+  assert(res == NULL || res->is_CheckCastPP(), "unexpected AllocateNode result");
   const TypeOopPtr* res_type = NULL;
   if (res != NULL) { // Could be NULL when there are no users
     res_type = _igvn.type(res)->isa_oopptr();
@@ -791,6 +792,7 @@
         for (int k = 0;  k < j; k++) {
           sfpt->del_req(last--);
         }
+        _igvn._worklist.push(sfpt);
         // rollback processed safepoints
         while (safepoints_done.length() > 0) {
           SafePointNode* sfpt_done = safepoints_done.pop();
@@ -815,6 +817,7 @@
               }
             }
           }
+          _igvn._worklist.push(sfpt_done);
         }
 #ifndef PRODUCT
         if (PrintEliminateAllocations) {
@@ -855,6 +858,7 @@
     int start = jvms->debug_start();
     int end   = jvms->debug_end();
     sfpt->replace_edges_in_range(res, sobj, start, end);
+    _igvn._worklist.push(sfpt);
     safepoints_done.append_if_missing(sfpt); // keep it for rollback
   }
   return true;
@@ -1034,6 +1038,8 @@
     return false;
   }
 
+  assert(boxing->result_cast() == NULL, "unexpected boxing node result");
+
   extract_call_projections(boxing);
 
   const TypeTuple* r = boxing->tf()->range();
@@ -1775,6 +1781,7 @@
       Node *pf_region = new RegionNode(3);
       Node *pf_phi_rawmem = new PhiNode( pf_region, Type::MEMORY,
                                              TypeRawPtr::BOTTOM );
+      transform_later(pf_region);
 
       // Generate several prefetch instructions.
       uint lines = (length != NULL) ? AllocatePrefetchLines : AllocateInstancePrefetchLines;
--- a/hotspot/src/share/vm/opto/matcher.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/matcher.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -305,7 +305,7 @@
   // to avoid false sharing if the corresponding mach node is not used.
   // The corresponding mach node is only used in rare cases for derived
   // pointers.
-  Node* new_ideal_null = ConNode::make(C, TypePtr::NULL_PTR);
+  Node* new_ideal_null = ConNode::make(TypePtr::NULL_PTR);
 
   // Swap out to old-space; emptying new-space
   Arena *old = C->node_arena()->move_contents(C->old_arena());
@@ -1643,8 +1643,8 @@
   }
 
   // Build the object to represent this state & prepare for recursive calls
-  MachNode *mach = s->MachNodeGenerator( rule, C );
-  mach->_opnds[0] = s->MachOperGenerator( _reduceOp[rule], C );
+  MachNode *mach = s->MachNodeGenerator(rule);
+  mach->_opnds[0] = s->MachOperGenerator(_reduceOp[rule]);
   assert( mach->_opnds[0] != NULL, "Missing result operand" );
   Node *leaf = s->_leaf;
   // Check for instruction or instruction chain rule
@@ -1756,13 +1756,13 @@
     assert( 0 <= opnd_class_instance && opnd_class_instance < NUM_OPERANDS,
             "Bad AD file: Instruction chain rule must chain from operand");
     // Insert operand into array of operands for this instruction
-    mach->_opnds[1] = s->MachOperGenerator( opnd_class_instance, C );
+    mach->_opnds[1] = s->MachOperGenerator(opnd_class_instance);
 
     ReduceOper( s, newrule, mem, mach );
   } else {
     // Chain from the result of an instruction
     assert( newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
-    mach->_opnds[1] = s->MachOperGenerator( _reduceOp[catch_op], C );
+    mach->_opnds[1] = s->MachOperGenerator(_reduceOp[catch_op]);
     Node *mem1 = (Node*)1;
     debug_only(Node *save_mem_node = _mem_node;)
     mach->add_req( ReduceInst(s, newrule, mem1) );
@@ -1807,7 +1807,7 @@
     if( newrule < NUM_OPERANDS ) { // Operand/operandClass or internalOp/instruction?
       // Operand/operandClass
       // Insert operand into array of operands for this instruction
-      mach->_opnds[num_opnds++] = newstate->MachOperGenerator( opnd_class_instance, C );
+      mach->_opnds[num_opnds++] = newstate->MachOperGenerator(opnd_class_instance);
       ReduceOper( newstate, newrule, mem, mach );
 
     } else {                    // Child is internal operand or new instruction
@@ -1818,7 +1818,7 @@
       } else {
         // instruction --> call build operand(  ) to catch result
         //             --> ReduceInst( newrule )
-        mach->_opnds[num_opnds++] = s->MachOperGenerator( _reduceOp[catch_op], C );
+        mach->_opnds[num_opnds++] = s->MachOperGenerator(_reduceOp[catch_op]);
         Node *mem1 = (Node*)1;
         debug_only(Node *save_mem_node = _mem_node;)
         mach->add_req( ReduceInst( newstate, newrule, mem1 ) );
--- a/hotspot/src/share/vm/opto/mathexactnode.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/mathexactnode.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -191,7 +191,7 @@
       NativeType val1 = TypeClass::as_self(type1)->get_con();
       NativeType val2 = TypeClass::as_self(type2)->get_con();
       if (node->will_overflow(val1, val2) == false) {
-        Node* con_result = ConINode::make(phase->C, 0);
+        Node* con_result = ConINode::make(0);
         return con_result;
       }
       return NULL;
--- a/hotspot/src/share/vm/opto/memnode.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/memnode.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -933,12 +933,12 @@
   return (LoadNode*)NULL;
 }
 
-LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) {
+LoadLNode* LoadLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) {
   bool require_atomic = true;
   return new LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, require_atomic);
 }
 
-LoadDNode* LoadDNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) {
+LoadDNode* LoadDNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) {
   bool require_atomic = true;
   return new LoadDNode(ctl, mem, adr, adr_type, rt, mo, require_atomic);
 }
@@ -1471,6 +1471,7 @@
 
   Node* ctrl    = in(MemNode::Control);
   Node* address = in(MemNode::Address);
+  bool progress = false;
 
   // Skip up past a SafePoint control.  Cannot do this for Stores because
   // pointer stores & cardmarks must stay on the same side of a SafePoint.
@@ -1478,6 +1479,7 @@
       phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw ) {
     ctrl = ctrl->in(0);
     set_req(MemNode::Control,ctrl);
+    progress = true;
   }
 
   intptr_t ignore = 0;
@@ -1490,6 +1492,7 @@
         && all_controls_dominate(base, phase->C->start())) {
       // A method-invariant, non-null address (constant or 'this' argument).
       set_req(MemNode::Control, NULL);
+      progress = true;
     }
   }
 
@@ -1550,7 +1553,7 @@
     }
   }
 
-  return NULL;                  // No further progress
+  return progress ? this : NULL;
 }
 
 // Helper to recognize certain Klass fields which are invariant across
@@ -2014,7 +2017,6 @@
 //----------------------------LoadKlassNode::make------------------------------
 // Polymorphic factory method:
 Node *LoadKlassNode::make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at, const TypeKlassPtr *tk ) {
-  Compile* C = gvn.C;
   Node *ctl = NULL;
   // sanity check the alias category against the created node type
   const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
@@ -2379,12 +2381,12 @@
   return (StoreNode*)NULL;
 }
 
-StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
+StoreLNode* StoreLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
   bool require_atomic = true;
   return new StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
 }
 
-StoreDNode* StoreDNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
+StoreDNode* StoreDNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
   bool require_atomic = true;
   return new StoreDNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
 }
@@ -2460,7 +2462,7 @@
       // and I need to disappear.
       if (moved != NULL) {
         // %%% hack to ensure that Ideal returns a new node:
-        mem = MergeMemNode::make(phase->C, mem);
+        mem = MergeMemNode::make(mem);
         return mem;             // fold me away
       }
     }
@@ -2820,7 +2822,6 @@
                                    intptr_t start_offset,
                                    Node* end_offset,
                                    PhaseGVN* phase) {
-  Compile* C = phase->C;
   intptr_t offset = start_offset;
 
   int unit = BytesPerLong;
@@ -2847,7 +2848,6 @@
     return mem;
   }
 
-  Compile* C = phase->C;
   int unit = BytesPerLong;
   Node* zbase = start_offset;
   Node* zend  = end_offset;
@@ -2875,7 +2875,6 @@
     return mem;
   }
 
-  Compile* C = phase->C;
   assert((end_offset % BytesPerInt) == 0, "odd end offset");
   intptr_t done_offset = end_offset;
   if ((done_offset % BytesPerLong) != 0) {
@@ -2944,6 +2943,7 @@
     return NULL;
   }
 
+  bool progress = false;
   // Eliminate volatile MemBars for scalar replaced objects.
   if (can_reshape && req() == (Precedent+1)) {
     bool eliminate = false;
@@ -2966,6 +2966,7 @@
           phase->is_IterGVN()->_worklist.push(my_mem); // remove dead node later
           my_mem = NULL;
         }
+        progress = true;
       }
       if (my_mem != NULL && my_mem->is_Mem()) {
         const TypeOopPtr* t_oop = my_mem->in(MemNode::Address)->bottom_type()->isa_oopptr();
@@ -2995,7 +2996,7 @@
       return new ConINode(TypeInt::ZERO);
     }
   }
-  return NULL;
+  return progress ? this : NULL;
 }
 
 //------------------------------Value------------------------------------------
@@ -3497,6 +3498,7 @@
   // if it redundantly stored the same value (or zero to fresh memory).
 
   // In any case, wire it in:
+  phase->igvn_rehash_node_delayed(this);
   set_req(i, new_st);
 
   // The caller may now kill the old guy.
@@ -4126,7 +4128,7 @@
 
 // Make a new, untransformed MergeMem with the same base as 'mem'.
 // If mem is itself a MergeMem, populate the result with the same edges.
-MergeMemNode* MergeMemNode::make(Compile* C, Node* mem) {
+MergeMemNode* MergeMemNode::make(Node* mem) {
   return new MergeMemNode(mem);
 }
 
--- a/hotspot/src/share/vm/opto/memnode.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/memnode.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -333,7 +333,7 @@
   virtual int store_Opcode() const { return Op_StoreL; }
   virtual BasicType memory_type() const { return T_LONG; }
   bool require_atomic_access() const { return _require_atomic_access; }
-  static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
+  static LoadLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
                                 const Type* rt, MemOrd mo);
 #ifndef PRODUCT
   virtual void dump_spec(outputStream *st) const {
@@ -384,7 +384,7 @@
   virtual int store_Opcode() const { return Op_StoreD; }
   virtual BasicType memory_type() const { return T_DOUBLE; }
   bool require_atomic_access() const { return _require_atomic_access; }
-  static LoadDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
+  static LoadDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
                                 const Type* rt, MemOrd mo);
 #ifndef PRODUCT
   virtual void dump_spec(outputStream *st) const {
@@ -593,7 +593,7 @@
   virtual int Opcode() const;
   virtual BasicType memory_type() const { return T_LONG; }
   bool require_atomic_access() const { return _require_atomic_access; }
-  static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
+  static StoreLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
 #ifndef PRODUCT
   virtual void dump_spec(outputStream *st) const {
     StoreNode::dump_spec(st);
@@ -629,7 +629,7 @@
   virtual int Opcode() const;
   virtual BasicType memory_type() const { return T_DOUBLE; }
   bool require_atomic_access() const { return _require_atomic_access; }
-  static StoreDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
+  static StoreDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
 #ifndef PRODUCT
   virtual void dump_spec(outputStream *st) const {
     StoreNode::dump_spec(st);
@@ -1138,7 +1138,7 @@
   // If the input is a whole memory state, clone it with all its slices intact.
   // Otherwise, make a new memory state with just that base memory input.
   // In either case, the result is a newly created MergeMem.
-  static MergeMemNode* make(Compile* C, Node* base_memory);
+  static MergeMemNode* make(Node* base_memory);
 
   virtual int Opcode() const;
   virtual Node *Identity( PhaseTransform *phase );
--- a/hotspot/src/share/vm/opto/movenode.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/movenode.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -88,7 +88,7 @@
     if( in(Condition)->is_Bool() ) {
       BoolNode* b  = in(Condition)->as_Bool();
       BoolNode* b2 = b->negate(phase);
-      return make( phase->C, in(Control), phase->transform(b2), in(IfTrue), in(IfFalse), _type );
+      return make(in(Control), phase->transform(b2), in(IfTrue), in(IfFalse), _type);
     }
   }
   return NULL;
@@ -158,7 +158,7 @@
 //------------------------------make-------------------------------------------
 // Make a correctly-flavored CMove.  Since _type is directly determined
 // from the inputs we do not need to specify it here.
-CMoveNode *CMoveNode::make( Compile *C, Node *c, Node *bol, Node *left, Node *right, const Type *t ) {
+CMoveNode *CMoveNode::make(Node *c, Node *bol, Node *left, Node *right, const Type *t) {
   switch( t->basic_type() ) {
     case T_INT:     return new CMoveINode( bol, left, right, t->is_int() );
     case T_FLOAT:   return new CMoveFNode( bol, left, right, t );
@@ -196,7 +196,7 @@
     if( in(Condition)->is_Bool() ) {
       BoolNode* b  = in(Condition)->as_Bool();
       BoolNode* b2 = b->negate(phase);
-      return make( phase->C, in(Control), phase->transform(b2), in(IfTrue), in(IfFalse), _type );
+      return make(in(Control), phase->transform(b2), in(IfTrue), in(IfFalse), _type);
     }
   }
 
--- a/hotspot/src/share/vm/opto/movenode.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/movenode.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -47,7 +47,7 @@
   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   virtual const Type *Value( PhaseTransform *phase ) const;
   virtual Node *Identity( PhaseTransform *phase );
-  static CMoveNode *make( Compile *C, Node *c, Node *bol, Node *left, Node *right, const Type *t );
+  static CMoveNode *make(Node *c, Node *bol, Node *left, Node *right, const Type *t);
   // Helper function to spot cmove graph shapes
   static Node *is_cmove_id( PhaseTransform *phase, Node *cmp, Node *t, Node *f, BoolNode *b );
 };
--- a/hotspot/src/share/vm/opto/node.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/node.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -507,7 +507,7 @@
                                   (const void*)(&mthis->_opnds), 1));
     mach->_opnds = to;
     for ( uint i = 0; i < nopnds; ++i ) {
-      to[i] = from[i]->clone(C);
+      to[i] = from[i]->clone();
     }
   }
   // cloning CallNode may need to clone JVMState
@@ -620,6 +620,7 @@
   *(address*)this = badAddress;  // smash the C++ vtbl, probably
   _in = _out = (Node**) badAddress;
   _max = _cnt = _outmax = _outcnt = 0;
+  compile->remove_modified_node(this);
 #endif
 }
 
@@ -765,6 +766,7 @@
   if (n != NULL) n->del_out((Node *)this);
   _in[idx] = in(--_cnt);  // Compact the array
   _in[_cnt] = NULL;       // NULL out emptied slot
+  Compile::current()->record_modified_node(this);
 }
 
 //------------------------------del_req_ordered--------------------------------
@@ -780,6 +782,7 @@
     Copy::conjoint_words_to_lower((HeapWord*)&_in[idx+1], (HeapWord*)&_in[idx], ((_cnt-idx-1)*sizeof(Node*)));
   }
   _in[--_cnt] = NULL;   // NULL out emptied slot
+  Compile::current()->record_modified_node(this);
 }
 
 //------------------------------ins_req----------------------------------------
@@ -1297,6 +1300,7 @@
       // Done with outputs.
       igvn->hash_delete(dead);
       igvn->_worklist.remove(dead);
+      igvn->C->remove_modified_node(dead);
       igvn->set_type(dead, Type::TOP);
       if (dead->is_macro()) {
         igvn->C->remove_macro_node(dead);
--- a/hotspot/src/share/vm/opto/node.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/node.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -398,6 +398,7 @@
     if (*p != NULL)  (*p)->del_out((Node *)this);
     (*p) = n;
     if (n != NULL)      n->add_out((Node *)this);
+    Compile::current()->record_modified_node(this);
   }
   // Light version of set_req() to init inputs after node creation.
   void init_req( uint i, Node *n ) {
@@ -409,6 +410,7 @@
     assert( _in[i] == NULL, "sanity");
     _in[i] = n;
     if (n != NULL)      n->add_out((Node *)this);
+    Compile::current()->record_modified_node(this);
   }
   // Find first occurrence of n among my edges:
   int find_edge(Node* n);
--- a/hotspot/src/share/vm/opto/output.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/output.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -526,7 +526,7 @@
 
         if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
           // We've got a winner.  Replace this branch.
-          MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
+          MachNode* replacement = mach->as_MachBranch()->short_branch_version();
 
           // Update the jmp_size.
           int new_size = replacement->size(_regalloc);
@@ -785,9 +785,10 @@
     // grow downwards in all implementations.
     // (If, on some machine, the interpreter's Java locals or stack
     // were to grow upwards, the embedded doubles would be word-swapped.)
-    jint   *dp = (jint*)&d;
-    array->append(new ConstantIntValue(dp[1]));
-    array->append(new ConstantIntValue(dp[0]));
+    jlong_accessor acc;
+    acc.long_value = jlong_cast(d);
+    array->append(new ConstantIntValue(acc.words[1]));
+    array->append(new ConstantIntValue(acc.words[0]));
 #endif
     break;
   }
@@ -804,9 +805,10 @@
     // grow downwards in all implementations.
     // (If, on some machine, the interpreter's Java locals or stack
     // were to grow upwards, the embedded doubles would be word-swapped.)
-    jint *dp = (jint*)&d;
-    array->append(new ConstantIntValue(dp[1]));
-    array->append(new ConstantIntValue(dp[0]));
+    jlong_accessor acc;
+    acc.long_value = d;
+    array->append(new ConstantIntValue(acc.words[1]));
+    array->append(new ConstantIntValue(acc.words[0]));
 #endif
     break;
   }
@@ -1174,7 +1176,7 @@
 
   // fill in the nop array for bundling computations
   MachNode *_nop_list[Bundle::_nop_count];
-  Bundle::initialize_nops(_nop_list, this);
+  Bundle::initialize_nops(_nop_list);
 
   return cb;
 }
@@ -1408,7 +1410,7 @@
 
             if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
               // We've got a winner.  Replace this branch.
-              MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
+              MachNode* replacement = mach->as_MachBranch()->short_branch_version();
 
               // Update the jmp_size.
               int new_size = replacement->size(_regalloc);
--- a/hotspot/src/share/vm/opto/parse1.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/parse1.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -575,12 +575,13 @@
       decrement_age();
     }
   }
-  if (depth() == 1) {
+
+  if (depth() == 1 && !failing()) {
     // Add check to deoptimize the nmethod if RTM state was changed
     rtm_deopt();
   }
 
-  // Check for bailouts during method entry.
+  // Check for bailouts during method entry or RTM state check setup.
   if (failing()) {
     if (log)  log->done("parse");
     C->set_default_node_notes(caller_nn);
@@ -1756,7 +1757,7 @@
       if (remerge == NULL) {
         assert(base != NULL, "");
         assert(base->in(0) != NULL, "should not be xformed away");
-        remerge = MergeMemNode::make(C, base->in(pnum));
+        remerge = MergeMemNode::make(base->in(pnum));
         gvn().set_type(remerge, Type::MEMORY);
         base->set_req(pnum, remerge);
       }
@@ -2199,7 +2200,7 @@
   // down below a SafePoint.
 
   // Clone the current memory state
-  Node* mem = MergeMemNode::make(C, map()->memory());
+  Node* mem = MergeMemNode::make(map()->memory());
 
   mem = _gvn.transform(mem);
 
@@ -2213,7 +2214,7 @@
 
   // Create a node for the polling address
   if( add_poll_param ) {
-    Node *polladr = ConPNode::make(C, (address)os::get_polling_page());
+    Node *polladr = ConPNode::make((address)os::get_polling_page());
     sfpnt->init_req(TypeFunc::Parms+0, _gvn.transform(polladr));
   }
 
--- a/hotspot/src/share/vm/opto/parseHelper.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/parseHelper.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,7 +47,7 @@
 
   // Get method
   const TypePtr* method_type = TypeMetadataPtr::make(method);
-  Node *method_node = _gvn.transform( ConNode::make(C, method_type) );
+  Node *method_node = _gvn.transform(ConNode::make(method_type));
 
   kill_dead_locals();
 
--- a/hotspot/src/share/vm/opto/phaseX.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/phaseX.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -615,7 +615,7 @@
 // Make an idealized constant - one of ConINode, ConPNode, etc.
 ConNode* PhaseValues::uncached_makecon(const Type *t) {
   assert(t->singleton(), "must be a constant");
-  ConNode* x = ConNode::make(C, t);
+  ConNode* x = ConNode::make(t);
   ConNode* k = (ConNode*)hash_find_insert(x); // Value numbering
   if (k == NULL) {
     set_type(x, t);             // Missed, provide type mapping
@@ -933,9 +933,32 @@
   for (int i = 0; i < _verify_window_size; i++) {
     _verify_window[i] = NULL;
   }
+#ifdef ASSERT
+  // Verify that all modified nodes are on _worklist
+  Unique_Node_List* modified_list = C->modified_nodes();
+  while (modified_list != NULL && modified_list->size()) {
+    Node* n = modified_list->pop();
+    if (n->outcnt() != 0 && !n->is_Con() && !_worklist.member(n)) {
+      n->dump();
+      assert(false, "modified node is not on IGVN._worklist");
+    }
+  }
+#endif
 }
 
 void PhaseIterGVN::verify_PhaseIterGVN() {
+#ifdef ASSERT
+  // Verify nodes with changed inputs.
+  Unique_Node_List* modified_list = C->modified_nodes();
+  while (modified_list != NULL && modified_list->size()) {
+    Node* n = modified_list->pop();
+    if (n->outcnt() != 0 && !n->is_Con()) { // skip dead and Con nodes
+      n->dump();
+      assert(false, "modified node was not processed by IGVN.transform_old()");
+    }
+  }
+#endif
+
   C->verify_graph_edges();
   if( VerifyOpto && allow_progress() ) {
     // Must turn off allow_progress to enable assert and break recursion
@@ -964,6 +987,14 @@
                   (int) _verify_counter, (int) _verify_full_passes);
     }
   }
+
+#ifdef ASSERT
+  while (modified_list->size()) {
+    Node* n = modified_list->pop();
+    n->dump();
+    assert(false, "VerifyIterativeGVN: new modified node was added");
+  }
+#endif
 }
 #endif /* PRODUCT */
 
@@ -1066,6 +1097,7 @@
   Node* k = n;
   DEBUG_ONLY(dead_loop_check(k);)
   DEBUG_ONLY(bool is_new = (k->outcnt() == 0);)
+  C->remove_modified_node(k);
   Node* i = k->Ideal(this, /*can_reshape=*/true);
   assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
 #ifndef PRODUCT
@@ -1107,6 +1139,7 @@
     DEBUG_ONLY(dead_loop_check(k);)
     // Try idealizing again
     DEBUG_ONLY(is_new = (k->outcnt() == 0);)
+    C->remove_modified_node(k);
     i = k->Ideal(this, /*can_reshape=*/true);
     assert(i != k || is_new || (i->outcnt() > 0), "don't return dead nodes");
 #ifndef PRODUCT
@@ -1259,6 +1292,7 @@
       _stack.pop();
       // Remove dead node from iterative worklist
       _worklist.remove(dead);
+      C->remove_modified_node(dead);
       // Constant node that has no out-edges and has only one in-edge from
       // root is usually dead. However, sometimes reshaping walk makes
       // it reachable by adding use edges. So, we will NOT count Con nodes
@@ -1288,7 +1322,7 @@
   for (DUIterator_Last imin, i = old->last_outs(imin); i >= imin; ) {
     Node* use = old->last_out(i);  // for each use...
     // use might need re-hashing (but it won't if it's a new node)
-    bool is_in_table = _table.hash_delete( use );
+    rehash_node_delayed(use);
     // Update use-def info as well
     // We remove all occurrences of old within use->in,
     // so as to avoid rehashing any node more than once.
@@ -1300,11 +1334,6 @@
         ++num_edges;
       }
     }
-    // Insert into GVN hash table if unique
-    // If a duplicate, 'use' will be cleaned up when pulled off worklist
-    if( is_in_table ) {
-      hash_find_insert(use);
-    }
     i -= num_edges;    // we deleted 1 or more copies of this edge
   }
 
@@ -1599,7 +1628,7 @@
     if( t == Type::TOP ) {
       // cache my top node on the Compile instance
       if( C->cached_top_node() == NULL || C->cached_top_node()->in(0) == NULL ) {
-        C->set_cached_top_node( ConNode::make(C, Type::TOP) );
+        C->set_cached_top_node(ConNode::make(Type::TOP));
         set_type(C->top(), Type::TOP);
       }
       nn = C->top();
@@ -1725,7 +1754,7 @@
         MachNode *m = n->as_Mach();
         int deleted_count = 0;
         // check for peephole opportunities
-        MachNode *m2 = m->peephole( block, instruction_index, _regalloc, deleted_count, C );
+        MachNode *m2 = m->peephole(block, instruction_index, _regalloc, deleted_count);
         if( m2 != NULL ) {
 #ifndef PRODUCT
           if( PrintOptoPeephole ) {
--- a/hotspot/src/share/vm/opto/phaseX.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/phaseX.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -311,6 +311,9 @@
                                const Type* limit_type) const
   { ShouldNotCallThis(); return NULL; }
 
+  // Delayed node rehash if this is an IGVN phase
+  virtual void igvn_rehash_node_delayed(Node* n) {}
+
 #ifndef PRODUCT
   void dump_old2new_map() const;
   void dump_new( uint new_lidx ) const;
@@ -488,6 +491,10 @@
     _worklist.push(n);
   }
 
+  void igvn_rehash_node_delayed(Node* n) {
+    rehash_node_delayed(n);
+  }
+
   // Replace ith edge of "n" with "in"
   void replace_input_of(Node* n, int i, Node* in) {
     rehash_node_delayed(n);
--- a/hotspot/src/share/vm/opto/rootnode.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/rootnode.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -35,10 +35,12 @@
 //------------------------------Ideal------------------------------------------
 // Remove dead inputs
 Node *RootNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  bool modified = false;
   for( uint i = 1; i < req(); i++ ) { // For all inputs
     // Check for and remove dead inputs
     if( phase->type(in(i)) == Type::TOP ) {
       del_req(i--);             // Delete TOP inputs
+      modified = true;
     }
   }
 
@@ -56,7 +58,7 @@
   // If we want to get the rest of the win later, we should pattern match
   // simple recursive call trees to closed-form solutions.
 
-  return NULL;                  // No further opportunities exposed
+  return modified ? this : NULL;
 }
 
 //=============================================================================
--- a/hotspot/src/share/vm/opto/runtime.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/runtime.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1381,11 +1381,11 @@
   }
   NamedCounter* c;
   if (tag == NamedCounter::BiasedLockingCounter) {
-    c = new BiasedLockingNamedCounter(strdup(st.as_string()));
+    c = new BiasedLockingNamedCounter(st.as_string());
   } else if (tag == NamedCounter::RTMLockingCounter) {
-    c = new RTMLockingNamedCounter(strdup(st.as_string()));
+    c = new RTMLockingNamedCounter(st.as_string());
   } else {
-    c = new NamedCounter(strdup(st.as_string()), tag);
+    c = new NamedCounter(st.as_string(), tag);
   }
 
   // atomically add the new counter to the head of the list.  We only
--- a/hotspot/src/share/vm/opto/runtime.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/runtime.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -75,11 +75,17 @@
 
  public:
   NamedCounter(const char *n, CounterTag tag = NoTag):
-    _name(n),
+    _name(n == NULL ? NULL : os::strdup(n)),
     _count(0),
     _next(NULL),
     _tag(tag) {}
 
+  ~NamedCounter() {
+    if (_name != NULL) {
+      os::free((void*)_name);
+    }
+  }
+
   const char * name() const     { return _name; }
   int count() const             { return _count; }
   address addr()                { return (address)&_count; }
--- a/hotspot/src/share/vm/opto/stringopts.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/stringopts.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1438,7 +1438,7 @@
   }
   // Make sure the memory state is a MergeMem for parsing.
   if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
-    map->set_req(TypeFunc::Memory, MergeMemNode::make(C, map->in(TypeFunc::Memory)));
+    map->set_req(TypeFunc::Memory, MergeMemNode::make(map->in(TypeFunc::Memory)));
   }
 
   jvms->set_map(map);
--- a/hotspot/src/share/vm/opto/subnode.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/subnode.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1168,7 +1168,6 @@
 Node* BoolNode::make_predicate(Node* test_value, PhaseGVN* phase) {
   if (test_value->is_Con())   return test_value;
   if (test_value->is_Bool())  return test_value;
-  Compile* C = phase->C;
   if (test_value->is_CMove() &&
       test_value->in(CMoveNode::Condition)->is_Bool()) {
     BoolNode*   bol   = test_value->in(CMoveNode::Condition)->as_Bool();
@@ -1191,7 +1190,7 @@
 //--------------------------------as_int_value---------------------------------
 Node* BoolNode::as_int_value(PhaseGVN* phase) {
   // Inverse to make_predicate.  The CMove probably boils down to a Conv2B.
-  Node* cmov = CMoveNode::make(phase->C, NULL, this,
+  Node* cmov = CMoveNode::make(NULL, this,
                                phase->intcon(0), phase->intcon(1),
                                TypeInt::BOOL);
   return phase->transform(cmov);
@@ -1199,7 +1198,6 @@
 
 //----------------------------------negate-------------------------------------
 BoolNode* BoolNode::negate(PhaseGVN* phase) {
-  Compile* C = phase->C;
   return new BoolNode(in(1), _test.negate());
 }
 
--- a/hotspot/src/share/vm/opto/superword.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/superword.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1378,9 +1378,23 @@
       if (n->is_Load()) {
         Node* ctl = n->in(MemNode::Control);
         Node* mem = first->in(MemNode::Memory);
+        SWPointer p1(n->as_Mem(), this);
+        // Identify the memory dependency for the new loadVector node by
+        // walking up through memory chain.
+        // This is done to give flexibility to the new loadVector node so that
+        // it can move above independent storeVector nodes.
+        while (mem->is_StoreVector()) {
+          SWPointer p2(mem->as_Mem(), this);
+          int cmp = p1.cmp(p2);
+          if (SWPointer::not_equal(cmp) || !SWPointer::comparable(cmp)) {
+            mem = mem->in(MemNode::Memory);
+          } else {
+            break; // dependent memory
+          }
+        }
         Node* adr = low_adr->in(MemNode::Address);
         const TypePtr* atyp = n->adr_type();
-        vn = LoadVectorNode::make(C, opc, ctl, mem, adr, atyp, vlen, velt_basic_type(n));
+        vn = LoadVectorNode::make(opc, ctl, mem, adr, atyp, vlen, velt_basic_type(n));
         vlen_in_bytes = vn->as_LoadVector()->memory_size();
       } else if (n->is_Store()) {
         // Promote value to be stored to vector
@@ -1389,7 +1403,7 @@
         Node* mem = first->in(MemNode::Memory);
         Node* adr = low_adr->in(MemNode::Address);
         const TypePtr* atyp = n->adr_type();
-        vn = StoreVectorNode::make(C, opc, ctl, mem, adr, atyp, val, vlen);
+        vn = StoreVectorNode::make(opc, ctl, mem, adr, atyp, val, vlen);
         vlen_in_bytes = vn->as_StoreVector()->memory_size();
       } else if (n->req() == 3) {
         // Promote operands to vector
@@ -1401,7 +1415,7 @@
           in1 = in2;
           in2 = tmp;
         }
-        vn = VectorNode::make(C, opc, in1, in2, vlen, velt_basic_type(n));
+        vn = VectorNode::make(opc, in1, in2, vlen, velt_basic_type(n));
         vlen_in_bytes = vn->as_Vector()->length_in_bytes();
       } else {
         ShouldNotReachHere();
@@ -1450,11 +1464,11 @@
       if (t != NULL && t->is_con()) {
         juint shift = t->get_con();
         if (shift > mask) { // Unsigned cmp
-          cnt = ConNode::make(C, TypeInt::make(shift & mask));
+          cnt = ConNode::make(TypeInt::make(shift & mask));
         }
       } else {
         if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) {
-          cnt = ConNode::make(C, TypeInt::make(mask));
+          cnt = ConNode::make(TypeInt::make(mask));
           _igvn.register_new_node_with_optimizer(cnt);
           cnt = new AndINode(opd, cnt);
           _igvn.register_new_node_with_optimizer(cnt);
@@ -1462,7 +1476,7 @@
         }
         assert(opd->bottom_type()->isa_int(), "int type only");
         // Move non constant shift count into vector register.
-        cnt = VectorNode::shift_count(C, p0, cnt, vlen, velt_basic_type(p0));
+        cnt = VectorNode::shift_count(p0, cnt, vlen, velt_basic_type(p0));
       }
       if (cnt != opd) {
         _igvn.register_new_node_with_optimizer(cnt);
@@ -1475,7 +1489,7 @@
     // p0's vector. Use p0's type because size of operand's container in
     // vector should match p0's size regardless operand's size.
     const Type* p0_t = velt_type(p0);
-    VectorNode* vn = VectorNode::scalar2vector(_phase->C, opd, vlen, p0_t);
+    VectorNode* vn = VectorNode::scalar2vector(opd, vlen, p0_t);
 
     _igvn.register_new_node_with_optimizer(vn);
     _phase->set_ctrl(vn, _phase->get_ctrl(opd));
@@ -1490,7 +1504,7 @@
 
   // Insert pack operation
   BasicType bt = velt_basic_type(p0);
-  PackNode* pk = PackNode::make(_phase->C, opd, vlen, bt);
+  PackNode* pk = PackNode::make(opd, vlen, bt);
   DEBUG_ONLY( const BasicType opd_bt = opd->bottom_type()->basic_type(); )
 
   for (uint i = 1; i < vlen; i++) {
@@ -1546,7 +1560,7 @@
     _igvn.hash_delete(def);
     int def_pos = alignment(def) / data_size(def);
 
-    Node* ex = ExtractNode::make(_phase->C, def, def_pos, velt_basic_type(def));
+    Node* ex = ExtractNode::make(def, def_pos, velt_basic_type(def));
     _igvn.register_new_node_with_optimizer(ex);
     _phase->set_ctrl(ex, _phase->get_ctrl(def));
     _igvn.replace_input_of(use, idx, ex);
--- a/hotspot/src/share/vm/opto/type.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/type.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -265,7 +265,7 @@
   // locking.
 
   Arena* save = current->type_arena();
-  Arena* shared_type_arena = new (mtCompiler)Arena();
+  Arena* shared_type_arena = new (mtCompiler)Arena(mtCompiler);
 
   current->set_type_arena(shared_type_arena);
   _shared_type_dict =
@@ -5087,11 +5087,11 @@
 // Dump Function Type
 #ifndef PRODUCT
 void TypeFunc::dump2( Dict &d, uint depth, outputStream *st ) const {
-  if( _range->_cnt <= Parms )
+  if( _range->cnt() <= Parms )
     st->print("void");
   else {
     uint i;
-    for (i = Parms; i < _range->_cnt-1; i++) {
+    for (i = Parms; i < _range->cnt()-1; i++) {
       _range->field_at(i)->dump2(d,depth,st);
       st->print("/");
     }
@@ -5104,9 +5104,9 @@
     return;
   }
   d.Insert((void*)this,(void*)this);    // Stop recursion
-  if (Parms < _domain->_cnt)
+  if (Parms < _domain->cnt())
     _domain->field_at(Parms)->dump2(d,depth-1,st);
-  for (uint i = Parms+1; i < _domain->_cnt; i++) {
+  for (uint i = Parms+1; i < _domain->cnt(); i++) {
     st->print(", ");
     _domain->field_at(i)->dump2(d,depth-1,st);
   }
--- a/hotspot/src/share/vm/opto/type.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/type.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -609,16 +609,16 @@
 // signature types.
 class TypeTuple : public Type {
   TypeTuple( uint cnt, const Type **fields ) : Type(Tuple), _cnt(cnt), _fields(fields) { }
+
+  const uint          _cnt;              // Count of fields
+  const Type ** const _fields;           // Array of field types
+
 public:
   virtual bool eq( const Type *t ) const;
   virtual int  hash() const;             // Type specific hashing
   virtual bool singleton(void) const;    // TRUE if type is a singleton
   virtual bool empty(void) const;        // TRUE if type is vacuous
 
-public:
-  const uint          _cnt;              // Count of fields
-  const Type ** const _fields;           // Array of field types
-
   // Accessors:
   uint cnt() const { return _cnt; }
   const Type* field_at(uint i) const {
@@ -1447,6 +1447,10 @@
   virtual int  hash() const;             // Type specific hashing
   virtual bool singleton(void) const;    // TRUE if type is a singleton
   virtual bool empty(void) const;        // TRUE if type is vacuous
+
+  const TypeTuple* const _domain;     // Domain of inputs
+  const TypeTuple* const _range;      // Range of results
+
 public:
   // Constants are shared among ADLC and VM
   enum { Control    = AdlcVMDeps::Control,
@@ -1457,8 +1461,6 @@
          Parms      = AdlcVMDeps::Parms
   };
 
-  const TypeTuple* const _domain;     // Domain of inputs
-  const TypeTuple* const _range;      // Range of results
 
   // Accessors:
   const TypeTuple* domain() const { return _domain; }
--- a/hotspot/src/share/vm/opto/vectornode.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/vectornode.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -245,7 +245,7 @@
 }
 
 // Return the vector version of a scalar operation node.
-VectorNode* VectorNode::make(Compile* C, int opc, Node* n1, Node* n2, uint vlen, BasicType bt) {
+VectorNode* VectorNode::make(int opc, Node* n1, Node* n2, uint vlen, BasicType bt) {
   const TypeVect* vt = TypeVect::make(bt, vlen);
   int vopc = VectorNode::opcode(opc, bt);
   // This method should not be called for unimplemented vectors.
@@ -299,7 +299,7 @@
 }
 
 // Scalar promotion
-VectorNode* VectorNode::scalar2vector(Compile* C, Node* s, uint vlen, const Type* opd_t) {
+VectorNode* VectorNode::scalar2vector(Node* s, uint vlen, const Type* opd_t) {
   BasicType bt = opd_t->array_element_basic_type();
   const TypeVect* vt = opd_t->singleton() ? TypeVect::make(opd_t, vlen)
                                           : TypeVect::make(bt, vlen);
@@ -323,7 +323,7 @@
   return NULL;
 }
 
-VectorNode* VectorNode::shift_count(Compile* C, Node* shift, Node* cnt, uint vlen, BasicType bt) {
+VectorNode* VectorNode::shift_count(Node* shift, Node* cnt, uint vlen, BasicType bt) {
   assert(VectorNode::is_shift(shift) && !cnt->is_Con(), "only variable shift count");
   // Match shift count type with shift vector type.
   const TypeVect* vt = TypeVect::make(bt, vlen);
@@ -342,7 +342,7 @@
 }
 
 // Return initial Pack node. Additional operands added with add_opd() calls.
-PackNode* PackNode::make(Compile* C, Node* s, uint vlen, BasicType bt) {
+PackNode* PackNode::make(Node* s, uint vlen, BasicType bt) {
   const TypeVect* vt = TypeVect::make(bt, vlen);
   switch (bt) {
   case T_BOOLEAN:
@@ -365,18 +365,18 @@
 }
 
 // Create a binary tree form for Packs. [lo, hi) (half-open) range
-PackNode* PackNode::binary_tree_pack(Compile* C, int lo, int hi) {
+PackNode* PackNode::binary_tree_pack(int lo, int hi) {
   int ct = hi - lo;
   assert(is_power_of_2(ct), "power of 2");
   if (ct == 2) {
-    PackNode* pk = PackNode::make(C, in(lo), 2, vect_type()->element_basic_type());
+    PackNode* pk = PackNode::make(in(lo), 2, vect_type()->element_basic_type());
     pk->add_opd(in(lo+1));
     return pk;
 
   } else {
     int mid = lo + ct/2;
-    PackNode* n1 = binary_tree_pack(C, lo,  mid);
-    PackNode* n2 = binary_tree_pack(C, mid, hi );
+    PackNode* n1 = binary_tree_pack(lo,  mid);
+    PackNode* n2 = binary_tree_pack(mid, hi );
 
     BasicType bt = n1->vect_type()->element_basic_type();
     assert(bt == n2->vect_type()->element_basic_type(), "should be the same");
@@ -402,23 +402,23 @@
 }
 
 // Return the vector version of a scalar load node.
-LoadVectorNode* LoadVectorNode::make(Compile* C, int opc, Node* ctl, Node* mem,
+LoadVectorNode* LoadVectorNode::make(int opc, Node* ctl, Node* mem,
                                      Node* adr, const TypePtr* atyp, uint vlen, BasicType bt) {
   const TypeVect* vt = TypeVect::make(bt, vlen);
   return new LoadVectorNode(ctl, mem, adr, atyp, vt);
 }
 
 // Return the vector version of a scalar store node.
-StoreVectorNode* StoreVectorNode::make(Compile* C, int opc, Node* ctl, Node* mem,
+StoreVectorNode* StoreVectorNode::make(int opc, Node* ctl, Node* mem,
                                        Node* adr, const TypePtr* atyp, Node* val,
                                        uint vlen) {
   return new StoreVectorNode(ctl, mem, adr, atyp, val);
 }
 
 // Extract a scalar element of vector.
-Node* ExtractNode::make(Compile* C, Node* v, uint position, BasicType bt) {
+Node* ExtractNode::make(Node* v, uint position, BasicType bt) {
   assert((int)position < Matcher::max_vector_size(bt), "pos in range");
-  ConINode* pos = ConINode::make(C, (int)position);
+  ConINode* pos = ConINode::make((int)position);
   switch (bt) {
   case T_BOOLEAN:
     return new ExtractUBNode(v, pos);
--- a/hotspot/src/share/vm/opto/vectornode.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/opto/vectornode.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -52,9 +52,9 @@
 
   virtual uint ideal_reg() const { return Matcher::vector_ideal_reg(vect_type()->length_in_bytes()); }
 
-  static VectorNode* scalar2vector(Compile* C, Node* s, uint vlen, const Type* opd_t);
-  static VectorNode* shift_count(Compile* C, Node* shift, Node* cnt, uint vlen, BasicType bt);
-  static VectorNode* make(Compile* C, int opc, Node* n1, Node* n2, uint vlen, BasicType bt);
+  static VectorNode* scalar2vector(Node* s, uint vlen, const Type* opd_t);
+  static VectorNode* shift_count(Node* shift, Node* cnt, uint vlen, BasicType bt);
+  static VectorNode* make(int opc, Node* n1, Node* n2, uint vlen, BasicType bt);
 
   static int  opcode(int opc, BasicType bt);
   static bool implemented(int opc, uint vlen, BasicType bt);
@@ -371,7 +371,7 @@
 
   virtual int store_Opcode() const { return Op_StoreVector; }
 
-  static LoadVectorNode* make(Compile* C, int opc, Node* ctl, Node* mem,
+  static LoadVectorNode* make(int opc, Node* ctl, Node* mem,
                               Node* adr, const TypePtr* atyp, uint vlen, BasicType bt);
 };
 
@@ -394,7 +394,7 @@
   virtual BasicType memory_type() const { return T_VOID; }
   virtual int memory_size() const { return vect_type()->length_in_bytes(); }
 
-  static StoreVectorNode* make(Compile* C, int opc, Node* ctl, Node* mem,
+  static StoreVectorNode* make(int opc, Node* ctl, Node* mem,
                                Node* adr, const TypePtr* atyp, Node* val,
                                uint vlen);
 };
@@ -465,9 +465,9 @@
   }
 
   // Create a binary tree form for Packs. [lo, hi) (half-open) range
-  PackNode* binary_tree_pack(Compile* C, int lo, int hi);
+  PackNode* binary_tree_pack(int lo, int hi);
 
-  static PackNode* make(Compile* C, Node* s, uint vlen, BasicType bt);
+  static PackNode* make(Node* s, uint vlen, BasicType bt);
 };
 
 //------------------------------PackBNode--------------------------------------
@@ -552,7 +552,7 @@
   virtual int Opcode() const;
   uint  pos() const { return in(2)->get_int(); }
 
-  static Node* make(Compile* C, Node* v, uint position, BasicType bt);
+  static Node* make(Node* v, uint position, BasicType bt);
 };
 
 //------------------------------ExtractBNode-----------------------------------
--- a/hotspot/src/share/vm/precompiled/precompiled.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/precompiled/precompiled.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -222,10 +222,17 @@
 # include "runtime/vmThread.hpp"
 # include "runtime/vm_operations.hpp"
 # include "runtime/vm_version.hpp"
+# include "services/allocationSite.hpp"
 # include "services/lowMemoryDetector.hpp"
+# include "services/mallocTracker.hpp"
+# include "services/memBaseline.hpp"
 # include "services/memoryPool.hpp"
 # include "services/memoryService.hpp"
 # include "services/memoryUsage.hpp"
+# include "services/memReporter.hpp"
+# include "services/memTracker.hpp"
+# include "services/nmtCommon.hpp"
+# include "services/virtualMemoryTracker.hpp"
 # include "utilities/accessFlags.hpp"
 # include "utilities/array.hpp"
 # include "utilities/bitMap.hpp"
@@ -240,6 +247,7 @@
 # include "utilities/hashtable.hpp"
 # include "utilities/histogram.hpp"
 # include "utilities/macros.hpp"
+# include "utilities/nativeCallStack.hpp"
 # include "utilities/numberSeq.hpp"
 # include "utilities/ostream.hpp"
 # include "utilities/preserveException.hpp"
--- a/hotspot/src/share/vm/prims/jni.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/prims/jni.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -74,6 +74,7 @@
 #include "runtime/signature.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/vm_operations.hpp"
+#include "services/memTracker.hpp"
 #include "services/runtimeService.hpp"
 #include "trace/tracing.hpp"
 #include "utilities/defaultStream.hpp"
@@ -2697,6 +2698,7 @@
     if (bad_address != NULL) {
       os::protect_memory(bad_address, size, os::MEM_PROT_READ,
                          /*is_committed*/false);
+      MemTracker::record_virtual_memory_type((void*)bad_address, mtInternal);
     }
   }
   return bad_address;
@@ -3857,6 +3859,7 @@
 void TestKlass_test();
 void TestBitMap_test();
 void TestAsUtf8();
+void Test_linked_list();
 #if INCLUDE_ALL_GCS
 void TestOldFreeSpaceCalculation_test();
 void TestG1BiasedArray_test();
@@ -3887,6 +3890,7 @@
     run_unit_test(TestBitMap_test());
     run_unit_test(TestAsUtf8());
     run_unit_test(ObjectMonitor::sanity_checks());
+    run_unit_test(Test_linked_list());
 #if INCLUDE_VM_STRUCTS
     run_unit_test(VMStructs::test());
 #endif
--- a/hotspot/src/share/vm/prims/jvmtiManageCapabilities.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiManageCapabilities.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -316,6 +316,7 @@
     avail.can_generate_frame_pop_events ||
     avail.can_generate_method_entry_events ||
     avail.can_generate_method_exit_events;
+#ifdef ZERO
   bool enter_all_methods =
     interp_events ||
     avail.can_generate_breakpoint_events;
@@ -324,6 +325,7 @@
     UseFastEmptyMethods = false;
     UseFastAccessorMethods = false;
   }
+#endif // ZERO
 
   if (avail.can_generate_breakpoint_events) {
     RewriteFrequentPairs = false;
--- a/hotspot/src/share/vm/prims/whitebox.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/prims/whitebox.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -52,8 +52,10 @@
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #endif // INCLUDE_ALL_GCS
 
-#ifdef INCLUDE_NMT
+#if INCLUDE_NMT
+#include "services/mallocSiteTable.hpp"
 #include "services/memTracker.hpp"
+#include "utilities/nativeCallStack.hpp"
 #endif // INCLUDE_NMT
 
 #include "compiler/compileBroker.hpp"
@@ -255,12 +257,16 @@
 // NMT picks it up correctly
 WB_ENTRY(jlong, WB_NMTMalloc(JNIEnv* env, jobject o, jlong size))
   jlong addr = 0;
+    addr = (jlong)(uintptr_t)os::malloc(size, mtTest);
+  return addr;
+WB_END
 
-  if (MemTracker::is_on() && !MemTracker::shutdown_in_progress()) {
-    addr = (jlong)(uintptr_t)os::malloc(size, mtTest);
-  }
-
-  return addr;
+// Alloc memory with pseudo call stack. The test can create psudo malloc
+// allocation site to stress the malloc tracking.
+WB_ENTRY(jlong, WB_NMTMallocWithPseudoStack(JNIEnv* env, jobject o, jlong size, jint pseudo_stack))
+  address pc = (address)(size_t)pseudo_stack;
+  NativeCallStack stack(&pc, 1);
+  return (jlong)os::malloc(size, mtTest, stack);
 WB_END
 
 // Free the memory allocated by NMTAllocTest
@@ -271,10 +277,8 @@
 WB_ENTRY(jlong, WB_NMTReserveMemory(JNIEnv* env, jobject o, jlong size))
   jlong addr = 0;
 
-  if (MemTracker::is_on() && !MemTracker::shutdown_in_progress()) {
     addr = (jlong)(uintptr_t)os::reserve_memory(size);
     MemTracker::record_virtual_memory_type((address)addr, mtTest);
-  }
 
   return addr;
 WB_END
@@ -293,20 +297,20 @@
   os::release_memory((char *)(uintptr_t)addr, size);
 WB_END
 
-// Block until the current generation of NMT data to be merged, used to reliably test the NMT feature
-WB_ENTRY(jboolean, WB_NMTWaitForDataMerge(JNIEnv* env))
-
-  if (!MemTracker::is_on() || MemTracker::shutdown_in_progress()) {
-    return false;
-  }
-
-  return MemTracker::wbtest_wait_for_data_merge();
+WB_ENTRY(jboolean, WB_NMTIsDetailSupported(JNIEnv* env))
+  return MemTracker::tracking_level() == NMT_detail;
 WB_END
 
-WB_ENTRY(jboolean, WB_NMTIsDetailSupported(JNIEnv* env))
-  return MemTracker::tracking_level() == MemTracker::NMT_detail;
+WB_ENTRY(void, WB_NMTOverflowHashBucket(JNIEnv* env, jobject o, jlong num))
+  address pc = (address)1;
+  for (jlong index = 0; index < num; index ++) {
+    NativeCallStack stack(&pc, 1);
+    os::malloc(0, mtTest, stack);
+    pc += MallocSiteTable::hash_buckets();
+  }
 WB_END
 
+
 #endif // INCLUDE_NMT
 
 static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobject method) {
@@ -597,6 +601,15 @@
   return NULL;
 WB_END
 
+WB_ENTRY(jobject, WB_GetSizeTVMFlag(JNIEnv* env, jobject o, jstring name))
+  uintx result;
+  if (GetVMFlag <size_t> (thread, env, name, &result, &CommandLineFlags::size_tAt)) {
+    ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
+    return longBox(thread, env, result);
+  }
+  return NULL;
+WB_END
+
 WB_ENTRY(jobject, WB_GetDoubleVMFlag(JNIEnv* env, jobject o, jstring name))
   double result;
   if (GetVMFlag <double> (thread, env, name, &result, &CommandLineFlags::doubleAt)) {
@@ -637,6 +650,11 @@
   SetVMFlag <uint64_t> (thread, env, name, &result, &CommandLineFlags::uint64_tAtPut);
 WB_END
 
+WB_ENTRY(void, WB_SetSizeTVMFlag(JNIEnv* env, jobject o, jstring name, jlong value))
+  size_t result = value;
+  SetVMFlag <size_t> (thread, env, name, &result, &CommandLineFlags::size_tAtPut);
+WB_END
+
 WB_ENTRY(void, WB_SetDoubleVMFlag(JNIEnv* env, jobject o, jstring name, jdouble value))
   double result = value;
   SetVMFlag <double> (thread, env, name, &result, &CommandLineFlags::doubleAtPut);
@@ -843,12 +861,13 @@
 #endif // INCLUDE_ALL_GCS
 #if INCLUDE_NMT
   {CC"NMTMalloc",           CC"(J)J",                 (void*)&WB_NMTMalloc          },
+  {CC"NMTMallocWithPseudoStack", CC"(JI)J",           (void*)&WB_NMTMallocWithPseudoStack},
   {CC"NMTFree",             CC"(J)V",                 (void*)&WB_NMTFree            },
   {CC"NMTReserveMemory",    CC"(J)J",                 (void*)&WB_NMTReserveMemory   },
   {CC"NMTCommitMemory",     CC"(JJ)V",                (void*)&WB_NMTCommitMemory    },
   {CC"NMTUncommitMemory",   CC"(JJ)V",                (void*)&WB_NMTUncommitMemory  },
   {CC"NMTReleaseMemory",    CC"(JJ)V",                (void*)&WB_NMTReleaseMemory   },
-  {CC"NMTWaitForDataMerge", CC"()Z",                  (void*)&WB_NMTWaitForDataMerge},
+  {CC"NMTOverflowHashBucket", CC"(J)V",               (void*)&WB_NMTOverflowHashBucket},
   {CC"NMTIsDetailSupported",CC"()Z",                  (void*)&WB_NMTIsDetailSupported},
 #endif // INCLUDE_NMT
   {CC"deoptimizeAll",      CC"()V",                   (void*)&WB_DeoptimizeAll     },
@@ -880,6 +899,7 @@
   {CC"setIntxVMFlag",      CC"(Ljava/lang/String;J)V",(void*)&WB_SetIntxVMFlag},
   {CC"setUintxVMFlag",     CC"(Ljava/lang/String;J)V",(void*)&WB_SetUintxVMFlag},
   {CC"setUint64VMFlag",    CC"(Ljava/lang/String;J)V",(void*)&WB_SetUint64VMFlag},
+  {CC"setSizeTVMFlag",     CC"(Ljava/lang/String;J)V",(void*)&WB_SetSizeTVMFlag},
   {CC"setDoubleVMFlag",    CC"(Ljava/lang/String;D)V",(void*)&WB_SetDoubleVMFlag},
   {CC"setStringVMFlag",    CC"(Ljava/lang/String;Ljava/lang/String;)V",
                                                       (void*)&WB_SetStringVMFlag},
@@ -891,6 +911,8 @@
                                                       (void*)&WB_GetUintxVMFlag},
   {CC"getUint64VMFlag",    CC"(Ljava/lang/String;)Ljava/lang/Long;",
                                                       (void*)&WB_GetUint64VMFlag},
+  {CC"getSizeTVMFlag",     CC"(Ljava/lang/String;)Ljava/lang/Long;",
+                                                      (void*)&WB_GetSizeTVMFlag},
   {CC"getDoubleVMFlag",    CC"(Ljava/lang/String;)Ljava/lang/Double;",
                                                       (void*)&WB_GetDoubleVMFlag},
   {CC"getStringVMFlag",    CC"(Ljava/lang/String;)Ljava/lang/String;",
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -300,6 +300,11 @@
   { "UseNewReflection",              JDK_Version::jdk(9), JDK_Version::jdk(10) },
   { "ReflectionWrapResolutionErrors",JDK_Version::jdk(9), JDK_Version::jdk(10) },
   { "VerifyReflectionBytecodes",     JDK_Version::jdk(9), JDK_Version::jdk(10) },
+  { "AutoShutdownNMT",               JDK_Version::jdk(9), JDK_Version::jdk(10) },
+#ifndef ZERO
+  { "UseFastAccessorMethods",        JDK_Version::jdk(9), JDK_Version::jdk(10) },
+  { "UseFastEmptyMethods",           JDK_Version::jdk(9), JDK_Version::jdk(10) },
+#endif // ZERO
   { NULL, JDK_Version(0), JDK_Version(0) }
 };
 
@@ -686,6 +691,10 @@
   if (!is_neg && CommandLineFlags::uint64_tAtPut(name, &uint64_t_v, origin)) {
     return true;
   }
+  size_t size_t_v = (size_t) v;
+  if (!is_neg && CommandLineFlags::size_tAtPut(name, &size_t_v, origin)) {
+    return true;
+  }
   return false;
 }
 
@@ -799,7 +808,7 @@
   } else {
     *bldarray = REALLOC_C_HEAP_ARRAY(char*, *bldarray, new_count, mtInternal);
   }
-  (*bldarray)[*count] = strdup(arg);
+  (*bldarray)[*count] = os::strdup_check_oom(arg);
   *count = new_count;
 }
 
@@ -1070,16 +1079,6 @@
   UseCompiler                = true;
   UseLoopCounter             = true;
 
-#ifndef ZERO
-  // Turn these off for mixed and comp.  Leave them on for Zero.
-  if (FLAG_IS_DEFAULT(UseFastAccessorMethods)) {
-    UseFastAccessorMethods = (mode == _int);
-  }
-  if (FLAG_IS_DEFAULT(UseFastEmptyMethods)) {
-    UseFastEmptyMethods = (mode == _int);
-  }
-#endif
-
   // Default values may be platform/compiler dependent -
   // use the saved values
   ClipInlining               = Arguments::_ClipInlining;
@@ -1431,6 +1430,22 @@
                 (int)ObjectAlignmentInBytes, os::vm_page_size());
     return false;
   }
+  if(SurvivorAlignmentInBytes == 0) {
+    SurvivorAlignmentInBytes = ObjectAlignmentInBytes;
+  } else {
+    if (!is_power_of_2(SurvivorAlignmentInBytes)) {
+      jio_fprintf(defaultStream::error_stream(),
+            "error: SurvivorAlignmentInBytes=%d must be power of 2\n",
+            (int)SurvivorAlignmentInBytes);
+      return false;
+    }
+    if (SurvivorAlignmentInBytes < ObjectAlignmentInBytes) {
+      jio_fprintf(defaultStream::error_stream(),
+          "error: SurvivorAlignmentInBytes=%d must be greater than ObjectAlignmentInBytes=%d \n",
+          (int)SurvivorAlignmentInBytes, (int)ObjectAlignmentInBytes);
+      return false;
+    }
+  }
   return true;
 }
 
@@ -1869,7 +1884,7 @@
 }
 
 void Arguments::process_java_launcher_argument(const char* launcher, void* extra_info) {
-  _sun_java_launcher = strdup(launcher);
+  _sun_java_launcher = os::strdup_check_oom(launcher);
 }
 
 bool Arguments::created_by_java_launcher() {
@@ -2372,7 +2387,7 @@
 
   if (PrintNMTStatistics) {
 #if INCLUDE_NMT
-    if (MemTracker::tracking_level() == MemTracker::NMT_off) {
+    if (MemTracker::tracking_level() == NMT_off) {
 #endif // INCLUDE_NMT
       warning("PrintNMTStatistics is disabled, because native memory tracking is not enabled");
       PrintNMTStatistics = false;
@@ -2979,7 +2994,7 @@
       // Redirect GC output to the file. -Xloggc:<filename>
       // ostream_init_log(), when called will use this filename
       // to initialize a fileStream.
-      _gc_log_filename = strdup(tail);
+      _gc_log_filename = os::strdup_check_oom(tail);
      if (!is_filename_valid(_gc_log_filename)) {
        jio_fprintf(defaultStream::output_stream(),
                   "Invalid file name for use with -Xloggc: Filename can only contain the "
@@ -3582,15 +3597,24 @@
       CommandLineFlags::printFlags(tty, false);
       vm_exit(0);
     }
+#if INCLUDE_NMT
     if (match_option(option, "-XX:NativeMemoryTracking", &tail)) {
-#if INCLUDE_NMT
-      MemTracker::init_tracking_options(tail);
-#else
-      jio_fprintf(defaultStream::error_stream(),
-        "Native Memory Tracking is not supported in this VM\n");
-      return JNI_ERR;
+      // The launcher did not setup nmt environment variable properly.
+//      if (!MemTracker::check_launcher_nmt_support(tail)) {
+//        warning("Native Memory Tracking did not setup properly, using wrong launcher?");
+//      }
+
+      // Verify if nmt option is valid.
+      if (MemTracker::verify_nmt_option()) {
+        // Late initialization, still in single-threaded mode.
+        if (MemTracker::tracking_level() >= NMT_summary) {
+          MemTracker::init();
+        }
+      } else {
+        vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
+      }
+    }
 #endif
-    }
 
 
 #ifndef PRODUCT
--- a/hotspot/src/share/vm/runtime/deoptimization.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/runtime/deoptimization.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -661,7 +661,7 @@
              (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size -
                                                                     top_frame_expression_stack_adjustment))) ||
             (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) ||
-            (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute) &&
+            (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute || el->should_reexecute()) &&
              (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size))
             )) {
         ttyLocker ttyl;
--- a/hotspot/src/share/vm/runtime/fprofiler.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/runtime/fprofiler.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -629,10 +629,16 @@
   }
 
   vmNode(const char* name, const TickPosition where) : ProfilerNode() {
-    _name = name;
+    _name = os::strdup(name);
     update(where);
   }
 
+  ~vmNode() {
+    if (_name != NULL) {
+      os::free((void*)_name);
+    }
+  }
+
   const char *name()    const { return _name; }
   bool is_compiled()    const { return true; }
 
@@ -784,7 +790,7 @@
   assert(index >= 0, "Must be positive");
   // Note that we call strdup below since the symbol may be resource allocated
   if (!table[index]) {
-    table[index] = new (this) vmNode(os::strdup(name), where);
+    table[index] = new (this) vmNode(name, where);
   } else {
     ProfilerNode* prev = table[index];
     for(ProfilerNode* node = prev; node; node = node->next()) {
@@ -794,7 +800,7 @@
       }
       prev = node;
     }
-    prev->set_next(new (this) vmNode(os::strdup(name), where));
+    prev->set_next(new (this) vmNode(name, where));
   }
 }
 
--- a/hotspot/src/share/vm/runtime/frame.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/runtime/frame.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -407,7 +407,8 @@
 
 address frame::interpreter_frame_bcp() const {
   assert(is_interpreted_frame(), "interpreted frame expected");
-  return (address)*interpreter_frame_bcp_addr();
+  address bcp = (address)*interpreter_frame_bcp_addr();
+  return interpreter_frame_method()->bcp_from(bcp);
 }
 
 void frame::interpreter_frame_set_bcp(address bcp) {
--- a/hotspot/src/share/vm/runtime/globals.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/runtime/globals.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -131,6 +131,19 @@
   *((uint64_t*) _addr) = value;
 }
 
+bool Flag::is_size_t() const {
+  return strcmp(_type, "size_t") == 0;
+}
+
+size_t Flag::get_size_t() const {
+  return *((size_t*) _addr);
+}
+
+void Flag::set_size_t(size_t value) {
+  check_writable();
+  *((size_t*) _addr) = value;
+}
+
 bool Flag::is_double() const {
   return strcmp(_type, "double") == 0;
 }
@@ -306,6 +319,9 @@
   if (is_uint64_t()) {
     st->print("%-16lu", get_uint64_t());
   }
+  if (is_size_t()) {
+    st->print(SIZE_FORMAT_W(-16), get_size_t());
+  }
   if (is_double()) {
     st->print("%-16f", get_double());
   }
@@ -395,6 +411,8 @@
     st->print("-XX:%s=" UINTX_FORMAT, _name, get_uintx());
   } else if (is_uint64_t()) {
     st->print("-XX:%s=" UINT64_FORMAT, _name, get_uint64_t());
+  } else if (is_size_t()) {
+    st->print("-XX:%s=" SIZE_FORMAT, _name, get_size_t());
   } else if (is_double()) {
     st->print("-XX:%s=%f", _name, get_double());
   } else if (is_ccstr()) {
@@ -723,6 +741,34 @@
   faddr->set_origin(origin);
 }
 
+bool CommandLineFlags::size_tAt(const char* name, size_t len, size_t* value) {
+  Flag* result = Flag::find_flag(name, len);
+  if (result == NULL) return false;
+  if (!result->is_size_t()) return false;
+  *value = result->get_size_t();
+  return true;
+}
+
+bool CommandLineFlags::size_tAtPut(const char* name, size_t len, size_t* value, Flag::Flags origin) {
+  Flag* result = Flag::find_flag(name, len);
+  if (result == NULL) return false;
+  if (!result->is_size_t()) return false;
+  size_t old_value = result->get_size_t();
+  trace_flag_changed<EventUnsignedLongFlagChanged, u8>(name, old_value, *value, origin);
+  result->set_size_t(*value);
+  *value = old_value;
+  result->set_origin(origin);
+  return true;
+}
+
+void CommandLineFlagsEx::size_tAtPut(CommandLineFlagWithType flag, size_t value, Flag::Flags origin) {
+  Flag* faddr = address_of_flag(flag);
+  guarantee(faddr != NULL && faddr->is_size_t(), "wrong flag type");
+  trace_flag_changed<EventUnsignedLongFlagChanged, u8>(faddr->_name, faddr->get_size_t(), value, origin);
+  faddr->set_size_t(value);
+  faddr->set_origin(origin);
+}
+
 bool CommandLineFlags::doubleAt(const char* name, size_t len, double* value) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
--- a/hotspot/src/share/vm/runtime/globals.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -275,6 +275,10 @@
   uint64_t get_uint64_t() const;
   void set_uint64_t(uint64_t value);
 
+  bool is_size_t() const;
+  size_t get_size_t() const;
+  void set_size_t(size_t value);
+
   bool is_double() const;
   double get_double() const;
   void set_double(double value);
@@ -350,7 +354,6 @@
   ~UIntFlagSetting()                         { *flag = val; }
 };
 
-
 class DoubleFlagSetting {
   double val;
   double* flag;
@@ -359,6 +362,14 @@
   ~DoubleFlagSetting()                           { *flag = val; }
 };
 
+class SizeTFlagSetting {
+  size_t val;
+  size_t* flag;
+ public:
+  SizeTFlagSetting(size_t& fl, size_t newValue) { flag = &fl; val = fl; fl = newValue; }
+  ~SizeTFlagSetting()                           { *flag = val; }
+};
+
 
 class CommandLineFlags {
  public:
@@ -377,6 +388,11 @@
   static bool uintxAtPut(const char* name, size_t len, uintx* value, Flag::Flags origin);
   static bool uintxAtPut(const char* name, uintx* value, Flag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); }
 
+  static bool size_tAt(const char* name, size_t len, size_t* value);
+  static bool size_tAt(const char* name, size_t* value)    { return size_tAt(name, strlen(name), value); }
+  static bool size_tAtPut(const char* name, size_t len, size_t* value, Flag::Flags origin);
+  static bool size_tAtPut(const char* name, size_t* value, Flag::Flags origin) { return size_tAtPut(name, strlen(name), value, origin); }
+
   static bool uint64_tAt(const char* name, size_t len, uint64_t* value);
   static bool uint64_tAt(const char* name, uint64_t* value) { return uint64_tAt(name, strlen(name), value); }
   static bool uint64_tAtPut(const char* name, size_t len, uint64_t* value, Flag::Flags origin);
@@ -945,11 +961,6 @@
   diagnostic(bool, PrintNMTStatistics, false,                               \
           "Print native memory tracking summary data if it is on")          \
                                                                             \
-  diagnostic(bool, AutoShutdownNMT, true,                                   \
-          "Automatically shutdown native memory tracking under stress "     \
-          "situations. When set to false, native memory tracking tries to " \
-          "stay alive at the expense of JVM performance")                   \
-                                                                            \
   diagnostic(bool, LogCompilation, false,                                   \
           "Log compilation activity in detail to LogFile")                  \
                                                                             \
@@ -1078,6 +1089,9 @@
   product(bool, ClassUnloading, true,                                       \
           "Do unloading of classes")                                        \
                                                                             \
+  product(bool, ClassUnloadingWithConcurrentMark, true,                     \
+          "Do unloading of classes with a concurrent marking cycle")        \
+                                                                            \
   develop(bool, DisableStartThread, false,                                  \
           "Disable starting of additional Java threads "                    \
           "(for debugging only)")                                           \
@@ -2786,12 +2800,6 @@
   product(bool, UseLoopCounter, true,                                       \
           "Increment invocation counter on backward branch")                \
                                                                             \
-  product(bool, UseFastEmptyMethods, true,                                  \
-          "Use fast method entry code for empty methods")                   \
-                                                                            \
-  product(bool, UseFastAccessorMethods, true,                               \
-          "Use fast method entry code for accessor methods")                \
-                                                                            \
   product_pd(bool, UseOnStackReplacement,                                   \
           "Use on stack replacement, calls runtime if invoc. counter "      \
           "overflows in loop")                                              \
@@ -3871,14 +3879,17 @@
   product(bool, PrintGCCause, true,                                         \
           "Include GC cause in GC logging")                                 \
                                                                             \
+  experimental(intx, SurvivorAlignmentInBytes, 0,                           \
+           "Default survivor space alignment in bytes")                     \
+                                                                            \
   product(bool , AllowNonVirtualCalls, false,                               \
           "Obey the ACC_SUPER flag and allow invokenonvirtual calls")       \
                                                                             \
   diagnostic(ccstr, SharedArchiveFile, NULL,                                \
           "Override the default location of the CDS archive file")          \
                                                                             \
-  experimental(uintx, ArrayAllocatorMallocLimit,                            \
-          SOLARIS_ONLY(64*K) NOT_SOLARIS(max_uintx),                        \
+  experimental(size_t, ArrayAllocatorMallocLimit,                           \
+          SOLARIS_ONLY(64*K) NOT_SOLARIS((size_t)-1),                       \
           "Allocation less than this value will be allocated "              \
           "using malloc. Larger allocations will use mmap.")                \
                                                                             \
--- a/hotspot/src/share/vm/runtime/globals_extension.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/runtime/globals_extension.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -200,6 +200,7 @@
   static void intxAtPut(CommandLineFlagWithType flag, intx value, Flag::Flags origin);
   static void uintxAtPut(CommandLineFlagWithType flag, uintx value, Flag::Flags origin);
   static void uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, Flag::Flags origin);
+  static void size_tAtPut(CommandLineFlagWithType flag, size_t value, Flag::Flags origin);
   static void doubleAtPut(CommandLineFlagWithType flag, double value, Flag::Flags origin);
   // Contract:  Flag will make private copy of the incoming value
   static void ccstrAtPut(CommandLineFlagWithType flag, ccstr value, Flag::Flags origin);
--- a/hotspot/src/share/vm/runtime/handles.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/runtime/handles.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -227,7 +227,7 @@
   HandleArea* _prev;          // link to outer (older) area
  public:
   // Constructor
-  HandleArea(HandleArea* prev) : Arena(Chunk::tiny_size) {
+  HandleArea(HandleArea* prev) : Arena(mtThread, Chunk::tiny_size) {
     debug_only(_handle_mark_nesting    = 0);
     debug_only(_no_handle_mark_nesting = 0);
     _prev = prev;
--- a/hotspot/src/share/vm/runtime/init.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/runtime/init.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,8 +34,10 @@
 #include "runtime/init.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/sharedRuntime.hpp"
+#include "services/memTracker.hpp"
 #include "utilities/macros.hpp"
 
+
 // Initialization done by VM thread in vm_init_globals()
 void check_ThreadShadow();
 void eventlog_init();
@@ -131,6 +133,12 @@
   javaClasses_init();   // must happen after vtable initialization
   stubRoutines_init2(); // note: StubRoutines need 2-phase init
 
+#if INCLUDE_NMT
+  // Solaris stack is walkable only after stubRoutines are set up.
+  // On Other platforms, the stack is always walkable.
+  NMT_stack_walkable = true;
+#endif // INCLUDE_NMT
+
   // All the flags that get adjusted by VM_Version_init and os::init_2
   // have been set so dump the flags now.
   if (PrintFlagsFinal) {
--- a/hotspot/src/share/vm/runtime/java.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/runtime/java.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -57,7 +57,6 @@
 #include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
 #include "runtime/vm_operations.hpp"
-#include "services/memReporter.hpp"
 #include "services/memTracker.hpp"
 #include "trace/tracing.hpp"
 #include "utilities/dtrace.hpp"
@@ -349,12 +348,7 @@
 #endif // ENABLE_ZAP_DEAD_LOCALS
   // Native memory tracking data
   if (PrintNMTStatistics) {
-    if (MemTracker::is_on()) {
-      BaselineTTYOutputer outputer(tty);
-      MemTracker::print_memory_usage(outputer, K, false);
-    } else {
-      tty->print_cr("%s", MemTracker::reason());
-    }
+    MemTracker::final_report(tty);
   }
 }
 
@@ -390,12 +384,7 @@
 
   // Native memory tracking data
   if (PrintNMTStatistics) {
-    if (MemTracker::is_on()) {
-      BaselineTTYOutputer outputer(tty);
-      MemTracker::print_memory_usage(outputer, K, false);
-    } else {
-      tty->print_cr("%s", MemTracker::reason());
-    }
+    MemTracker::final_report(tty);
   }
 }
 
@@ -544,10 +533,6 @@
     BeforeExit_lock->notify_all();
   }
 
-  // Shutdown NMT before exit. Otherwise,
-  // it will run into trouble when system destroys static variables.
-  MemTracker::shutdown(MemTracker::NMT_normal);
-
   if (VerifyStringTableAtExit) {
     int fail_cnt = 0;
     {
--- a/hotspot/src/share/vm/runtime/os.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/runtime/os.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -52,6 +52,7 @@
 #include "runtime/thread.inline.hpp"
 #include "runtime/vm_version.hpp"
 #include "services/attachListener.hpp"
+#include "services/nmtCommon.hpp"
 #include "services/memTracker.hpp"
 #include "services/threadService.hpp"
 #include "utilities/defaultStream.hpp"
@@ -516,6 +517,14 @@
   return dup_str;
 }
 
+char* os::strdup_check_oom(const char* str, MEMFLAGS flags) {
+  char* p = os::strdup(str, flags);
+  if (p == NULL) {
+    vm_exit_out_of_memory(strlen(str) + 1, OOM_MALLOC_ERROR, "os::strdup_check_oom");
+  }
+  return p;
+}
+
 
 #define paranoid                 0  /* only set to 1 if you suspect checking code has bug */
 
@@ -553,7 +562,11 @@
   return ptr;
 }
 
-void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
+void* os::malloc(size_t size, MEMFLAGS flags) {
+  return os::malloc(size, flags, CALLER_PC);
+}
+
+void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
   NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
   NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
 
@@ -579,11 +592,15 @@
     size = 1;
   }
 
+  // NMT support
+  NMT_TrackingLevel level = MemTracker::tracking_level();
+  size_t            nmt_header_size = MemTracker::malloc_header_size(level);
+
 #ifndef ASSERT
-  const size_t alloc_size = size;
+  const size_t alloc_size = size + nmt_header_size;
 #else
-  const size_t alloc_size = GuardedMemory::get_total_size(size);
-  if (size > alloc_size) { // Check for rollover.
+  const size_t alloc_size = GuardedMemory::get_total_size(size + nmt_header_size);
+  if (size + nmt_header_size > alloc_size) { // Check for rollover.
     return NULL;
   }
 #endif
@@ -602,7 +619,7 @@
     return NULL;
   }
   // Wrap memory with guard
-  GuardedMemory guarded(ptr, size);
+  GuardedMemory guarded(ptr, size + nmt_header_size);
   ptr = guarded.get_user_ptr();
 #endif
   if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
@@ -615,48 +632,50 @@
   }
 
   // we do not track guard memory
-  MemTracker::record_malloc((address)ptr, size, memflags, caller == 0 ? CALLER_PC : caller);
-
-  return ptr;
+  return MemTracker::record_malloc((address)ptr, size, memflags, stack, level);
 }
 
+void* os::realloc(void *memblock, size_t size, MEMFLAGS flags) {
+  return os::realloc(memblock, size, flags, CALLER_PC);
+}
 
-void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller) {
+void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
 #ifndef ASSERT
   NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
   NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
-  MemTracker::Tracker tkr = MemTracker::get_realloc_tracker();
-  void* ptr = ::realloc(memblock, size);
-  if (ptr != NULL) {
-    tkr.record((address)memblock, (address)ptr, size, memflags,
-     caller == 0 ? CALLER_PC : caller);
-  } else {
-    tkr.discard();
-  }
-  return ptr;
+   // NMT support
+  void* membase = MemTracker::record_free(memblock);
+  NMT_TrackingLevel level = MemTracker::tracking_level();
+  size_t  nmt_header_size = MemTracker::malloc_header_size(level);
+  void* ptr = ::realloc(membase, size + nmt_header_size);
+  return MemTracker::record_malloc(ptr, size, memflags, stack, level);
 #else
   if (memblock == NULL) {
-    return os::malloc(size, memflags, (caller == 0 ? CALLER_PC : caller));
+    return os::malloc(size, memflags, stack);
   }
   if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
     tty->print_cr("os::realloc caught " PTR_FORMAT, memblock);
     breakpoint();
   }
-  verify_memory(memblock);
+  // NMT support
+  void* membase = MemTracker::malloc_base(memblock);
+  verify_memory(membase);
   NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
   if (size == 0) {
     return NULL;
   }
   // always move the block
-  void* ptr = os::malloc(size, memflags, caller == 0 ? CALLER_PC : caller);
+  void* ptr = os::malloc(size, memflags, stack);
   if (PrintMalloc) {
     tty->print_cr("os::remalloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr);
   }
   // Copy to new memory if malloc didn't fail
   if ( ptr != NULL ) {
-    GuardedMemory guarded(memblock);
-    memcpy(ptr, memblock, MIN2(size, guarded.get_user_size()));
-    if (paranoid) verify_memory(ptr);
+    GuardedMemory guarded(MemTracker::malloc_base(memblock));
+    // Guard's user data contains NMT header
+    size_t memblock_size = guarded.get_user_size() - MemTracker::malloc_header_size(memblock);
+    memcpy(ptr, memblock, MIN2(size, memblock_size));
+    if (paranoid) verify_memory(MemTracker::malloc_base(ptr));
     if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
       tty->print_cr("os::realloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, ptr);
       breakpoint();
@@ -669,7 +688,6 @@
 
 
 void  os::free(void *memblock, MEMFLAGS memflags) {
-  address trackp = (address) memblock;
   NOT_PRODUCT(inc_stat_counter(&num_frees, 1));
 #ifdef ASSERT
   if (memblock == NULL) return;
@@ -677,20 +695,22 @@
     if (tty != NULL) tty->print_cr("os::free caught " PTR_FORMAT, memblock);
     breakpoint();
   }
-  verify_memory(memblock);
+  void* membase = MemTracker::record_free(memblock);
+  verify_memory(membase);
   NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
 
-  GuardedMemory guarded(memblock);
+  GuardedMemory guarded(membase);
   size_t size = guarded.get_user_size();
   inc_stat_counter(&free_bytes, size);
-  memblock = guarded.release_for_freeing();
+  membase = guarded.release_for_freeing();
   if (PrintMalloc && tty != NULL) {
-      fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (uintptr_t)memblock);
+      fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (uintptr_t)membase);
   }
+  ::free(membase);
+#else
+  void* membase = MemTracker::record_free(memblock);
+  ::free(membase);
 #endif
-  MemTracker::record_free(trackp, memflags);
-
-  ::free(memblock);
 }
 
 void os::init_random(long initval) {
@@ -1478,7 +1498,7 @@
 char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
   char* result = pd_reserve_memory(bytes, addr, alignment_hint);
   if (result != NULL) {
-    MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
+    MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
   }
 
   return result;
@@ -1488,7 +1508,7 @@
    MEMFLAGS flags) {
   char* result = pd_reserve_memory(bytes, addr, alignment_hint);
   if (result != NULL) {
-    MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
+    MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
     MemTracker::record_virtual_memory_type((address)result, flags);
   }
 
@@ -1498,7 +1518,7 @@
 char* os::attempt_reserve_memory_at(size_t bytes, char* addr) {
   char* result = pd_attempt_reserve_memory_at(bytes, addr);
   if (result != NULL) {
-    MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
+    MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
   }
   return result;
 }
@@ -1538,23 +1558,29 @@
 }
 
 bool os::uncommit_memory(char* addr, size_t bytes) {
-  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
-  bool res = pd_uncommit_memory(addr, bytes);
-  if (res) {
-    tkr.record((address)addr, bytes);
+  bool res;
+  if (MemTracker::tracking_level() > NMT_minimal) {
+    Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
+    res = pd_uncommit_memory(addr, bytes);
+    if (res) {
+      tkr.record((address)addr, bytes);
+    }
   } else {
-    tkr.discard();
+    res = pd_uncommit_memory(addr, bytes);
   }
   return res;
 }
 
 bool os::release_memory(char* addr, size_t bytes) {
-  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
-  bool res = pd_release_memory(addr, bytes);
-  if (res) {
-    tkr.record((address)addr, bytes);
+  bool res;
+  if (MemTracker::tracking_level() > NMT_minimal) {
+    Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
+    res = pd_release_memory(addr, bytes);
+    if (res) {
+      tkr.record((address)addr, bytes);
+    }
   } else {
-    tkr.discard();
+    res = pd_release_memory(addr, bytes);
   }
   return res;
 }
@@ -1565,7 +1591,7 @@
                            bool allow_exec) {
   char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec);
   if (result != NULL) {
-    MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, mtNone, CALLER_PC);
+    MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC);
   }
   return result;
 }
@@ -1578,12 +1604,15 @@
 }
 
 bool os::unmap_memory(char *addr, size_t bytes) {
-  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
-  bool result = pd_unmap_memory(addr, bytes);
-  if (result) {
-    tkr.record((address)addr, bytes);
+  bool result;
+  if (MemTracker::tracking_level() > NMT_minimal) {
+    Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
+    result = pd_unmap_memory(addr, bytes);
+    if (result) {
+      tkr.record((address)addr, bytes);
+    }
   } else {
-    tkr.discard();
+    result = pd_unmap_memory(addr, bytes);
   }
   return result;
 }
--- a/hotspot/src/share/vm/runtime/os.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/runtime/os.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -65,6 +65,8 @@
 class Event;
 class DLL;
 class FileHandle;
+class NativeCallStack;
+
 template<class E> class GrowableArray;
 
 // %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose
@@ -96,9 +98,11 @@
 // Typedef for structured exception handling support
 typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
 
+class MallocTracker;
+
 class os: AllStatic {
   friend class VMStructs;
-
+  friend class MallocTracker;
  public:
   enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
 
@@ -160,7 +164,10 @@
   // Override me as needed
   static int    file_name_strcmp(const char* s1, const char* s2);
 
+  // get/unset environment variable
   static bool getenv(const char* name, char* buffer, int len);
+  static bool unsetenv(const char* name);
+
   static bool have_special_privileges();
 
   static jlong  javaTimeMillis();
@@ -207,8 +214,13 @@
 
   // Interface for detecting multiprocessor system
   static inline bool is_MP() {
+#if !INCLUDE_NMT
     assert(_processor_count > 0, "invalid processor count");
     return _processor_count > 1 || AssumeMP;
+#else
+    // NMT needs atomic operations before this initialization.
+    return true;
+#endif
   }
   static julong available_memory();
   static julong physical_memory();
@@ -635,15 +647,25 @@
   static void* thread_local_storage_at(int index);
   static void  free_thread_local_storage(int index);
 
-  // Stack walk
-  static address get_caller_pc(int n = 0);
+  // Retrieve native stack frames.
+  // Parameter:
+  //   stack:  an array to storage stack pointers.
+  //   frames: size of above array.
+  //   toSkip: number of stack frames to skip at the beginning.
+  // Return: number of stack frames captured.
+  static int get_native_stack(address* stack, int size, int toSkip = 0);
 
   // General allocation (must be MT-safe)
-  static void* malloc  (size_t size, MEMFLAGS flags, address caller_pc = 0);
-  static void* realloc (void *memblock, size_t size, MEMFLAGS flags, address caller_pc = 0);
+  static void* malloc  (size_t size, MEMFLAGS flags, const NativeCallStack& stack);
+  static void* malloc  (size_t size, MEMFLAGS flags);
+  static void* realloc (void *memblock, size_t size, MEMFLAGS flag, const NativeCallStack& stack);
+  static void* realloc (void *memblock, size_t size, MEMFLAGS flag);
+
   static void  free    (void *memblock, MEMFLAGS flags = mtNone);
   static bool  check_heap(bool force = false);      // verify C heap integrity
   static char* strdup(const char *, MEMFLAGS flags = mtInternal);  // Like strdup
+  // Like strdup, but exit VM when strdup() returns NULL
+  static char* strdup_check_oom(const char*, MEMFLAGS flags = mtInternal);
 
 #ifndef PRODUCT
   static julong num_mallocs;         // # of calls to malloc/realloc
--- a/hotspot/src/share/vm/runtime/safepoint.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/runtime/safepoint.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -52,7 +52,6 @@
 #include "runtime/sweeper.hpp"
 #include "runtime/synchronizer.hpp"
 #include "runtime/thread.inline.hpp"
-#include "services/memTracker.hpp"
 #include "services/runtimeService.hpp"
 #include "utilities/events.hpp"
 #include "utilities/macros.hpp"
@@ -527,10 +526,6 @@
     TraceTime t7("purging class loader data graph", TraceSafepointCleanupTime);
     ClassLoaderDataGraph::purge_if_needed();
   }
-
-  if (MemTracker::is_on()) {
-    MemTracker::sync();
-  }
 }
 
 
--- a/hotspot/src/share/vm/runtime/sharedRuntimeMath.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntimeMath.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -27,20 +27,51 @@
 
 #include <math.h>
 
-// VM_LITTLE_ENDIAN is #defined appropriately in the Makefiles
-// [jk] this is not 100% correct because the float word order may different
-// from the byte order (e.g. on ARM FPA)
+// Used to access the lower/higher 32 bits of a double
+typedef union {
+    double d;
+    struct {
 #ifdef VM_LITTLE_ENDIAN
-# define __HI(x) *(1+(int*)&x)
-# define __LO(x) *(int*)&x
+      int lo;
+      int hi;
 #else
-# define __HI(x) *(int*)&x
-# define __LO(x) *(1+(int*)&x)
+      int hi;
+      int lo;
 #endif
+    } split;
+} DoubleIntConv;
+
+static inline int high(double d) {
+  DoubleIntConv x;
+  x.d = d;
+  return x.split.hi;
+}
+
+static inline int low(double d) {
+  DoubleIntConv x;
+  x.d = d;
+  return x.split.lo;
+}
+
+static inline void set_high(double* d, int high) {
+  DoubleIntConv conv;
+  conv.d = *d;
+  conv.split.hi = high;
+  *d = conv.d;
+}
+
+static inline void set_low(double* d, int low) {
+  DoubleIntConv conv;
+  conv.d = *d;
+  conv.split.lo = low;
+  *d = conv.d;
+}
 
 static double copysignA(double x, double y) {
-  __HI(x) = (__HI(x)&0x7fffffff)|(__HI(y)&0x80000000);
-  return x;
+  DoubleIntConv convX;
+  convX.d = x;
+  convX.split.hi = (convX.split.hi & 0x7fffffff) | (high(y) & 0x80000000);
+  return convX.d;
 }
 
 /*
@@ -67,30 +98,32 @@
 hugeX  = 1.0e+300,
 tiny   = 1.0e-300;
 
-static double scalbnA (double x, int n) {
+static double scalbnA(double x, int n) {
   int  k,hx,lx;
-  hx = __HI(x);
-  lx = __LO(x);
+  hx = high(x);
+  lx = low(x);
   k = (hx&0x7ff00000)>>20;              /* extract exponent */
   if (k==0) {                           /* 0 or subnormal x */
     if ((lx|(hx&0x7fffffff))==0) return x; /* +-0 */
     x *= two54;
-    hx = __HI(x);
+    hx = high(x);
     k = ((hx&0x7ff00000)>>20) - 54;
     if (n< -50000) return tiny*x;       /*underflow*/
   }
   if (k==0x7ff) return x+x;             /* NaN or Inf */
   k = k+n;
-  if (k >  0x7fe) return hugeX*copysignA(hugeX,x); /* overflow  */
-  if (k > 0)                            /* normal result */
-    {__HI(x) = (hx&0x800fffff)|(k<<20); return x;}
+  if (k > 0x7fe) return hugeX*copysignA(hugeX,x); /* overflow  */
+  if (k > 0) {                          /* normal result */
+    set_high(&x, (hx&0x800fffff)|(k<<20));
+    return x;
+  }
   if (k <= -54) {
     if (n > 50000)      /* in case integer overflow in n+k */
       return hugeX*copysignA(hugeX,x);  /*overflow*/
     else return tiny*copysignA(tiny,x); /*underflow*/
   }
   k += 54;                              /* subnormal result */
-  __HI(x) = (hx&0x800fffff)|(k<<20);
+  set_high(&x, (hx&0x800fffff)|(k<<20));
   return x*twom54;
 }
 
--- a/hotspot/src/share/vm/runtime/sharedRuntimeTrans.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntimeTrans.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -40,6 +40,7 @@
 // generated; can not figure out how to turn down optimization for one
 // file in the IDE on Windows
 #ifdef WIN32
+# pragma warning( disable: 4748 ) // /GS can not protect parameters and local variables from local buffer overrun because optimizations are disabled in function
 # pragma optimize ( "", off )
 #endif
 
@@ -114,8 +115,8 @@
   int k,hx,i,j;
   unsigned lx;
 
-  hx = __HI(x);               /* high word of x */
-  lx = __LO(x);               /* low  word of x */
+  hx = high(x);               /* high word of x */
+  lx = low(x);                /* low  word of x */
 
   k=0;
   if (hx < 0x00100000) {                   /* x < 2**-1022  */
@@ -123,13 +124,13 @@
       return -two54/zero;             /* log(+-0)=-inf */
     if (hx<0) return (x-x)/zero;   /* log(-#) = NaN */
     k -= 54; x *= two54; /* subnormal number, scale up x */
-    hx = __HI(x);             /* high word of x */
+    hx = high(x);             /* high word of x */
   }
   if (hx >= 0x7ff00000) return x+x;
   k += (hx>>20)-1023;
   hx &= 0x000fffff;
   i = (hx+0x95f64)&0x100000;
-  __HI(x) = hx|(i^0x3ff00000);        /* normalize x or x/2 */
+  set_high(&x, hx|(i^0x3ff00000)); /* normalize x or x/2 */
   k += (i>>20);
   f = x-1.0;
   if((0x000fffff&(2+hx))<3) {  /* |f| < 2**-20 */
@@ -208,8 +209,8 @@
   int i,k,hx;
   unsigned lx;
 
-  hx = __HI(x);       /* high word of x */
-  lx = __LO(x);       /* low word of x */
+  hx = high(x);       /* high word of x */
+  lx = low(x);        /* low word of x */
 
   k=0;
   if (hx < 0x00100000) {                  /* x < 2**-1022  */
@@ -217,14 +218,14 @@
       return -two54/zero;             /* log(+-0)=-inf */
     if (hx<0) return (x-x)/zero;        /* log(-#) = NaN */
     k -= 54; x *= two54; /* subnormal number, scale up x */
-    hx = __HI(x);                /* high word of x */
+    hx = high(x);                /* high word of x */
   }
   if (hx >= 0x7ff00000) return x+x;
   k += (hx>>20)-1023;
   i  = ((unsigned)k&0x80000000)>>31;
   hx = (hx&0x000fffff)|((0x3ff-i)<<20);
   y  = (double)(k+i);
-  __HI(x) = hx;
+  set_high(&x, hx);
   z  = y*log10_2lo + ivln10*__ieee754_log(x);
   return  z+y*log10_2hi;
 }
@@ -319,14 +320,14 @@
   int k=0,xsb;
   unsigned hx;
 
-  hx  = __HI(x);        /* high word of x */
+  hx  = high(x);                /* high word of x */
   xsb = (hx>>31)&1;             /* sign bit of x */
   hx &= 0x7fffffff;             /* high word of |x| */
 
   /* filter out non-finite argument */
   if(hx >= 0x40862E42) {                        /* if |x|>=709.78... */
     if(hx>=0x7ff00000) {
-      if(((hx&0xfffff)|__LO(x))!=0)
+      if(((hx&0xfffff)|low(x))!=0)
         return x+x;             /* NaN */
       else return (xsb==0)? x:0.0;      /* exp(+-inf)={inf,0} */
     }
@@ -357,10 +358,10 @@
   if(k==0)      return one-((x*c)/(c-2.0)-x);
   else          y = one-((lo-(x*c)/(2.0-c))-hi);
   if(k >= -1021) {
-    __HI(y) += (k<<20); /* add k to y's exponent */
+    set_high(&y, high(y) + (k<<20)); /* add k to y's exponent */
     return y;
   } else {
-    __HI(y) += ((k+1000)<<20);/* add k to y's exponent */
+    set_high(&y, high(y) + ((k+1000)<<20)); /* add k to y's exponent */
     return y*twom1000;
   }
 }
@@ -447,8 +448,8 @@
   unsigned lx,ly;
 
   i0 = ((*(int*)&one)>>29)^1; i1=1-i0;
-  hx = __HI(x); lx = __LO(x);
-  hy = __HI(y); ly = __LO(y);
+  hx = high(x); lx = low(x);
+  hy = high(y); ly = low(y);
   ix = hx&0x7fffffff;  iy = hy&0x7fffffff;
 
   /* y==zero: x**0 = 1 */
@@ -548,14 +549,14 @@
     u = ivln2_h*t;      /* ivln2_h has 21 sig. bits */
     v = t*ivln2_l-w*ivln2;
     t1 = u+v;
-    __LO(t1) = 0;
+    set_low(&t1, 0);
     t2 = v-(t1-u);
   } else {
     double ss,s2,s_h,s_l,t_h,t_l;
     n = 0;
     /* take care subnormal number */
     if(ix<0x00100000)
-      {ax *= two53; n -= 53; ix = __HI(ax); }
+      {ax *= two53; n -= 53; ix = high(ax); }
     n  += ((ix)>>20)-0x3ff;
     j  = ix&0x000fffff;
     /* determine interval */
@@ -563,17 +564,17 @@
     if(j<=0x3988E) k=0;         /* |x|<sqrt(3/2) */
     else if(j<0xBB67A) k=1;     /* |x|<sqrt(3)   */
     else {k=0;n+=1;ix -= 0x00100000;}
-    __HI(ax) = ix;
+    set_high(&ax, ix);
 
     /* compute ss = s_h+s_l = (x-1)/(x+1) or (x-1.5)/(x+1.5) */
     u = ax-bp[k];               /* bp[0]=1.0, bp[1]=1.5 */
     v = one/(ax+bp[k]);
     ss = u*v;
     s_h = ss;
-    __LO(s_h) = 0;
+    set_low(&s_h, 0);
     /* t_h=ax+bp[k] High */
     t_h = zeroX;
-    __HI(t_h)=((ix>>1)|0x20000000)+0x00080000+(k<<18);
+    set_high(&t_h, ((ix>>1)|0x20000000)+0x00080000+(k<<18));
     t_l = ax - (t_h-bp[k]);
     s_l = v*((u-s_h*t_h)-s_h*t_l);
     /* compute log(ax) */
@@ -582,32 +583,32 @@
     r += s_l*(s_h+ss);
     s2  = s_h*s_h;
     t_h = 3.0+s2+r;
-    __LO(t_h) = 0;
+    set_low(&t_h, 0);
     t_l = r-((t_h-3.0)-s2);
     /* u+v = ss*(1+...) */
     u = s_h*t_h;
     v = s_l*t_h+t_l*ss;
     /* 2/(3log2)*(ss+...) */
     p_h = u+v;
-    __LO(p_h) = 0;
+    set_low(&p_h, 0);
     p_l = v-(p_h-u);
     z_h = cp_h*p_h;             /* cp_h+cp_l = 2/(3*log2) */
     z_l = cp_l*p_h+p_l*cp+dp_l[k];
     /* log2(ax) = (ss+..)*2/(3*log2) = n + dp_h + z_h + z_l */
     t = (double)n;
     t1 = (((z_h+z_l)+dp_h[k])+t);
-    __LO(t1) = 0;
+    set_low(&t1, 0);
     t2 = z_l-(((t1-t)-dp_h[k])-z_h);
   }
 
   /* split up y into y1+y2 and compute (y1+y2)*(t1+t2) */
   y1  = y;
-  __LO(y1) = 0;
+  set_low(&y1, 0);
   p_l = (y-y1)*t1+y*t2;
   p_h = y1*t1;
   z = p_l+p_h;
-  j = __HI(z);
-  i = __LO(z);
+  j = high(z);
+  i = low(z);
   if (j>=0x40900000) {                          /* z >= 1024 */
     if(((j-0x40900000)|i)!=0)                   /* if z > 1024 */
       return s*hugeX*hugeX;                     /* overflow */
@@ -631,13 +632,13 @@
     n = j+(0x00100000>>(k+1));
     k = ((n&0x7fffffff)>>20)-0x3ff;     /* new k for n */
     t = zeroX;
-    __HI(t) = (n&~(0x000fffff>>k));
+    set_high(&t, (n&~(0x000fffff>>k)));
     n = ((n&0x000fffff)|0x00100000)>>(20-k);
     if(j<0) n = -n;
     p_h -= t;
   }
   t = p_l+p_h;
-  __LO(t) = 0;
+  set_low(&t, 0);
   u = t*lg2_h;
   v = (p_l-(t-p_h))*lg2+t*lg2_l;
   z = u+v;
@@ -646,10 +647,10 @@
   t1  = z - t*(P1+t*(P2+t*(P3+t*(P4+t*P5))));
   r  = (z*t1)/(t1-two)-(w+z*w);
   z  = one-(r-z);
-  j  = __HI(z);
+  j  = high(z);
   j += (n<<20);
   if((j>>20)<=0) z = scalbnA(z,n);       /* subnormal output */
-  else __HI(z) += (n<<20);
+  else set_high(&z, high(z) + (n<<20));
   return s*z;
 }
 
--- a/hotspot/src/share/vm/runtime/sharedRuntimeTrig.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntimeTrig.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -519,7 +519,7 @@
 {
         double z,r,v;
         int ix;
-        ix = __HI(x)&0x7fffffff;        /* high word of x */
+        ix = high(x)&0x7fffffff;                /* high word of x */
         if(ix<0x3e400000)                       /* |x| < 2**-27 */
            {if((int)x==0) return x;}            /* generate inexact */
         z       =  x*x;
@@ -574,9 +574,9 @@
 
 static double __kernel_cos(double x, double y)
 {
-  double a,h,z,r,qx;
+  double a,h,z,r,qx=0;
   int ix;
-  ix = __HI(x)&0x7fffffff;      /* ix = |x|'s high word*/
+  ix = high(x)&0x7fffffff;              /* ix = |x|'s high word*/
   if(ix<0x3e400000) {                   /* if x < 2**27 */
     if(((int)x)==0) return one;         /* generate inexact */
   }
@@ -588,8 +588,8 @@
     if(ix > 0x3fe90000) {               /* x > 0.78125 */
       qx = 0.28125;
     } else {
-      __HI(qx) = ix-0x00200000; /* x/4 */
-      __LO(qx) = 0;
+      set_high(&qx, ix-0x00200000); /* x/4 */
+      set_low(&qx, 0);
     }
     h = 0.5*z-qx;
     a = one-qx;
@@ -654,11 +654,11 @@
 {
   double z,r,v,w,s;
   int ix,hx;
-  hx = __HI(x);   /* high word of x */
+  hx = high(x);           /* high word of x */
   ix = hx&0x7fffffff;     /* high word of |x| */
   if(ix<0x3e300000) {                     /* x < 2**-28 */
     if((int)x==0) {                       /* generate inexact */
-      if (((ix | __LO(x)) | (iy + 1)) == 0)
+      if (((ix | low(x)) | (iy + 1)) == 0)
         return one / fabsd(x);
       else {
         if (iy == 1)
@@ -667,10 +667,10 @@
           double a, t;
 
           z = w = x + y;
-          __LO(z) = 0;
+          set_low(&z, 0);
           v = y - (z - x);
           t = a = -one / w;
-          __LO(t) = 0;
+          set_low(&t, 0);
           s = one + t * z;
           return t + a * (s + t * v);
         }
@@ -705,10 +705,10 @@
     /*  compute -1.0/(x+r) accurately */
     double a,t;
     z  = w;
-    __LO(z) = 0;
+    set_low(&z, 0);
     v  = r-(z - x);     /* z+v = r+x */
     t = a  = -1.0/w;    /* a = -1.0/w */
-    __LO(t) = 0;
+    set_low(&t, 0);
     s  = 1.0+t*z;
     return t+a*(s+t*v);
   }
@@ -757,7 +757,7 @@
   int n, ix;
 
   /* High word of x. */
-  ix = __HI(x);
+  ix = high(x);
 
   /* |x| ~< pi/4 */
   ix &= 0x7fffffff;
@@ -815,7 +815,7 @@
   int n, ix;
 
   /* High word of x. */
-  ix = __HI(x);
+  ix = high(x);
 
   /* |x| ~< pi/4 */
   ix &= 0x7fffffff;
@@ -872,7 +872,7 @@
   int n, ix;
 
   /* High word of x. */
-  ix = __HI(x);
+  ix = high(x);
 
   /* |x| ~< pi/4 */
   ix &= 0x7fffffff;
--- a/hotspot/src/share/vm/runtime/thread.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -297,8 +297,7 @@
 #if INCLUDE_NMT
   // record thread's native stack, stack grows downward
   address stack_low_addr = stack_base() - stack_size();
-  MemTracker::record_thread_stack(stack_low_addr, stack_size(), this,
-      CURRENT_PC);
+  MemTracker::record_thread_stack(stack_low_addr, stack_size());
 #endif // INCLUDE_NMT
 }
 
@@ -316,7 +315,7 @@
 #if INCLUDE_NMT
   if (_stack_base != NULL) {
     address low_stack_addr = stack_base() - stack_size();
-    MemTracker::release_thread_stack(low_stack_addr, stack_size(), this);
+    MemTracker::release_thread_stack(low_stack_addr, stack_size());
 #ifdef ASSERT
     set_stack_base(NULL);
 #endif
@@ -1425,9 +1424,6 @@
   set_monitor_chunks(NULL);
   set_next(NULL);
   set_thread_state(_thread_new);
-#if INCLUDE_NMT
-  set_recorder(NULL);
-#endif
   _terminated = _not_terminated;
   _privileged_stack_top = NULL;
   _array_for_gc = NULL;
@@ -1503,7 +1499,6 @@
     _jni_attach_state = _not_attaching_via_jni;
   }
   assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
-  _safepoint_visible = false;
 }
 
 bool JavaThread::reguard_stack(address cur_sp) {
@@ -1566,7 +1561,6 @@
   thr_type = entry_point == &compiler_thread_entry ? os::compiler_thread :
                                                      os::java_thread;
   os::create_thread(this, thr_type, stack_sz);
-  _safepoint_visible = false;
   // The _osthread may be NULL here because we ran out of memory (too many threads active).
   // We need to throw and OutOfMemoryError - however we cannot do this here because the caller
   // may hold a lock and all locks must be unlocked before throwing the exception (throwing
@@ -1584,13 +1578,6 @@
       tty->print_cr("terminate thread %p", this);
   }
 
-  // By now, this thread should already be invisible to safepoint,
-  // and its per-thread recorder also collected.
-  assert(!is_safepoint_visible(), "wrong state");
-#if INCLUDE_NMT
-  assert(get_recorder() == NULL, "Already collected");
-#endif // INCLUDE_NMT
-
   // JSR166 -- return the parker to the free list
   Parker::Release(_parker);
   _parker = NULL;
@@ -3359,11 +3346,6 @@
   // initialize TLS
   ThreadLocalStorage::init();
 
-  // Bootstrap native memory tracking, so it can start recording memory
-  // activities before worker thread is started. This is the first phase
-  // of bootstrapping, VM is currently running in single-thread mode.
-  MemTracker::bootstrap_single_thread();
-
   // Initialize output stream logging
   ostream_init_log();
 
@@ -3414,9 +3396,6 @@
   // Initialize Java-Level synchronization subsystem
   ObjectMonitor::Initialize();
 
-  // Second phase of bootstrapping, VM is about entering multi-thread mode
-  MemTracker::bootstrap_multi_thread();
-
   // Initialize global modules
   jint status = init_globals();
   if (status != JNI_OK) {
@@ -3438,9 +3417,6 @@
   // real raw monitor. VM is setup enough here for raw monitor enter.
   JvmtiExport::transition_pending_onload_raw_monitors();
 
-  // Fully start NMT
-  MemTracker::start();
-
   // Create the VMThread
   { TraceTime timer("Start VMThread", TraceStartupTime);
     VMThread::create();
@@ -3995,8 +3971,6 @@
     daemon = false;
   }
 
-  p->set_safepoint_visible(true);
-
   ThreadService::add_thread(p, daemon);
 
   // Possible GC point.
@@ -4042,13 +4016,6 @@
     // to do callbacks into the safepoint code. However, the safepoint code is not aware
     // of this thread since it is removed from the queue.
     p->set_terminated_value();
-
-    // Now, this thread is not visible to safepoint
-    p->set_safepoint_visible(false);
-    // once the thread becomes safepoint invisible, we can not use its per-thread
-    // recorder. And Threads::do_threads() no longer walks this thread, so we have
-    // to release its per-thread recorder here.
-    MemTracker::thread_exiting(p);
   } // unlock Threads_lock
 
   // Since Events::log uses a lock, we grab it outside the Threads_lock
--- a/hotspot/src/share/vm/runtime/thread.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -43,10 +43,6 @@
 #include "runtime/unhandledOops.hpp"
 #include "utilities/macros.hpp"
 
-#if INCLUDE_NMT
-#include "services/memRecorder.hpp"
-#endif // INCLUDE_NMT
-
 #include "trace/traceBackend.hpp"
 #include "trace/traceMacros.hpp"
 #include "utilities/exceptions.hpp"
@@ -1036,16 +1032,6 @@
   bool do_not_unlock_if_synchronized()             { return _do_not_unlock_if_synchronized; }
   void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
 
-#if INCLUDE_NMT
-  // native memory tracking
-  inline MemRecorder* get_recorder() const          { return (MemRecorder*)_recorder; }
-  inline void         set_recorder(MemRecorder* rc) { _recorder = rc; }
-
- private:
-  // per-thread memory recorder
-  MemRecorder* volatile _recorder;
-#endif // INCLUDE_NMT
-
   // Suspend/resume support for JavaThread
  private:
   inline void set_ext_suspended();
@@ -1485,19 +1471,6 @@
      return result;
    }
 
- // NMT (Native memory tracking) support.
- // This flag helps NMT to determine if this JavaThread will be blocked
- // at safepoint. If not, ThreadCritical is needed for writing memory records.
- // JavaThread is only safepoint visible when it is in Threads' thread list,
- // it is not visible until it is added to the list and becomes invisible
- // once it is removed from the list.
- public:
-  bool is_safepoint_visible() const { return _safepoint_visible; }
-  void set_safepoint_visible(bool visible) { _safepoint_visible = visible; }
- private:
-  bool _safepoint_visible;
-
-  // Static operations
  public:
   // Returns the running thread as a JavaThread
   static inline JavaThread* current();
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -52,6 +52,7 @@
 #include "interpreter/bytecodes.hpp"
 #include "interpreter/interpreter.hpp"
 #include "memory/allocation.hpp"
+#include "memory/allocation.inline.hpp"
 #include "memory/cardTableRS.hpp"
 #include "memory/defNewGeneration.hpp"
 #include "memory/freeBlockDictionary.hpp"
@@ -93,6 +94,7 @@
 #include "runtime/globals.hpp"
 #include "runtime/java.hpp"
 #include "runtime/javaCalls.hpp"
+#include "runtime/os.hpp"
 #include "runtime/perfMemory.hpp"
 #include "runtime/serviceThread.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -3296,14 +3298,14 @@
     }
   }
   if (strstr(typeName, " const") == typeName + len - 6) {
-    char * s = strdup(typeName);
+    char * s = os::strdup_check_oom(typeName);
     s[len - 6] = '\0';
     // tty->print_cr("checking \"%s\" for \"%s\"", s, typeName);
     if (recursiveFindType(origtypes, s, true) == 1) {
-      free(s);
+      os::free(s);
       return 1;
     }
-    free(s);
+    os::free(s);
   }
   if (!isRecurse) {
     tty->print_cr("type \"%s\" not found", typeName);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/allocationSite.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_ALLOCATION_SITE_HPP
+#define SHARE_VM_SERVICES_ALLOCATION_SITE_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/nativeCallStack.hpp"
+
+// Allocation site represents a code path that makes a memory
+// allocation
+template <class E> class AllocationSite VALUE_OBJ_CLASS_SPEC {
+ private:
+  NativeCallStack  _call_stack;
+  E                e;
+ public:
+  AllocationSite(const NativeCallStack& stack) : _call_stack(stack) { }
+  int hash() const { return _call_stack.hash(); }
+  bool equals(const NativeCallStack& stack) const {
+    return _call_stack.equals(stack);
+  }
+
+  bool equals(const AllocationSite<E>& other) const {
+    return other.equals(_call_stack);
+  }
+
+  const NativeCallStack* call_stack() const {
+    return &_call_stack;
+  }
+
+  // Information regarding this allocation
+  E* data()             { return &e; }
+  const E* peek() const { return &e; }
+};
+
+#endif  // SHARE_VM_SERVICES_ALLOCATION_SITE_HPP
--- a/hotspot/src/share/vm/services/attachListener.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/services/attachListener.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -320,6 +320,25 @@
   return res? JNI_OK : JNI_ERR;
 }
 
+// set a size_t global flag using value from AttachOperation
+static jint set_size_t_flag(const char* name, AttachOperation* op, outputStream* out) {
+  size_t value;
+  const char* arg1;
+  if ((arg1 = op->arg(1)) != NULL) {
+    int n = sscanf(arg1, SIZE_FORMAT, &value);
+    if (n != 1) {
+      out->print_cr("flag value must be an unsigned integer");
+      return JNI_ERR;
+    }
+  }
+  bool res = CommandLineFlags::size_tAtPut((char*)name, &value, Flag::ATTACH_ON_DEMAND);
+  if (! res) {
+    out->print_cr("setting flag %s failed", name);
+  }
+
+  return res? JNI_OK : JNI_ERR;
+}
+
 // set a string global flag using value from AttachOperation
 static jint set_ccstr_flag(const char* name, AttachOperation* op, outputStream* out) {
   const char* value;
@@ -356,6 +375,8 @@
       return set_uintx_flag(name, op, out);
     } else if (f->is_uint64_t()) {
       return set_uint64_t_flag(name, op, out);
+    } else if (f->is_size_t()) {
+      return set_size_t_flag(name, op, out);
     } else if (f->is_ccstr()) {
       return set_ccstr_flag(name, op, out);
     } else {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/mallocSiteTable.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+
+
+#include "memory/allocation.inline.hpp"
+#include "runtime/atomic.hpp"
+#include "services/mallocSiteTable.hpp"
+
+/*
+ * Early os::malloc() calls come from initializations of static variables, long before entering any
+ * VM code. Upon the arrival of the first os::malloc() call, malloc site hashtable has to be
+ * initialized, along with the allocation site for the hashtable entries.
+ * To ensure that malloc site hashtable can be initialized without triggering any additional os::malloc()
+ * call, the hashtable bucket array and hashtable entry allocation site have to be static.
+ * It is not a problem for hashtable bucket, since it is an array of pointer type, C runtime just
+ * allocates a block memory and zero the memory for it.
+ * But for hashtable entry allocation site object, things get tricky. C runtime not only allocates
+ * memory for it, but also calls its constructor at some later time. If we initialize the allocation site
+ * at the first os::malloc() call, the object will be reinitialized when its constructor is called
+ * by C runtime.
+ * To workaround above issue, we declare a static size_t array with the size of the CallsiteHashtableEntry,
+ * the memory is used to instantiate CallsiteHashtableEntry for the hashtable entry allocation site.
+ * Given it is a primitive type array, C runtime will do nothing other than assign the memory block for the variable,
+ * which is exactly what we want.
+ * The same trick is also applied to create NativeCallStack object for CallsiteHashtableEntry memory allocation.
+ *
+ * Note: C++ object usually aligns to particular alignment, depends on compiler implementation, we declare
+ * the memory as size_t arrays, to ensure the memory is aligned to native machine word alignment.
+ */
+
+// Reserve enough memory for NativeCallStack and MallocSiteHashtableEntry objects
+size_t MallocSiteTable::_hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
+size_t MallocSiteTable::_hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
+
+// Malloc site hashtable buckets
+MallocSiteHashtableEntry*  MallocSiteTable::_table[MallocSiteTable::table_size];
+
+// concurrent access counter
+volatile int MallocSiteTable::_access_count = 0;
+
+// Tracking hashtable contention
+NOT_PRODUCT(int MallocSiteTable::_peak_count = 0;)
+
+
+/*
+ * Initialize malloc site table.
+ * Hashtable entry is malloc'd, so it can cause infinite recursion.
+ * To avoid above problem, we pre-initialize a hash entry for
+ * this allocation site.
+ * The method is called during C runtime static variable initialization
+ * time, it is in single-threaded mode from JVM perspective.
+ */
+bool MallocSiteTable::initialize() {
+  assert(sizeof(_hash_entry_allocation_stack) >= sizeof(NativeCallStack), "Sanity Check");
+  assert(sizeof(_hash_entry_allocation_site) >= sizeof(MallocSiteHashtableEntry),
+    "Sanity Check");
+  assert((size_t)table_size <= MAX_MALLOCSITE_TABLE_SIZE, "Hashtable overflow");
+
+  // Fake the call stack for hashtable entry allocation
+  assert(NMT_TrackingStackDepth > 1, "At least one tracking stack");
+
+  // Create pseudo call stack for hashtable entry allocation
+  address pc[3];
+  if (NMT_TrackingStackDepth >= 3) {
+    pc[2] = (address)MallocSiteTable::allocation_at;
+  }
+  if (NMT_TrackingStackDepth >= 2) {
+    pc[1] = (address)MallocSiteTable::lookup_or_add;
+  }
+  pc[0] = (address)MallocSiteTable::new_entry;
+
+  // Instantiate NativeCallStack object, have to use placement new operator. (see comments above)
+  NativeCallStack* stack = ::new ((void*)_hash_entry_allocation_stack)
+    NativeCallStack(pc, MIN2(((int)(sizeof(pc) / sizeof(address))), ((int)NMT_TrackingStackDepth)));
+
+  // Instantiate hash entry for hashtable entry allocation callsite
+  MallocSiteHashtableEntry* entry = ::new ((void*)_hash_entry_allocation_site)
+    MallocSiteHashtableEntry(*stack);
+
+  // Add the allocation site to hashtable.
+  int index = hash_to_index(stack->hash());
+  _table[index] = entry;
+
+  return true;
+}
+
+// Walks entries in the hashtable.
+// It stops walk if the walker returns false.
+bool MallocSiteTable::walk(MallocSiteWalker* walker) {
+  MallocSiteHashtableEntry* head;
+  for (int index = 0; index < table_size; index ++) {
+    head = _table[index];
+    while (head != NULL) {
+      if (!walker->do_malloc_site(head->peek())) {
+        return false;
+      }
+      head = (MallocSiteHashtableEntry*)head->next();
+    }
+  }
+  return true;
+}
+
+/*
+ *  The hashtable does not have deletion policy on individual entry,
+ *  and each linked list node is inserted via compare-and-swap,
+ *  so each linked list is stable, the contention only happens
+ *  at the end of linked list.
+ *  This method should not return NULL under normal circumstance.
+ *  If NULL is returned, it indicates:
+ *    1. Out of memory, it cannot allocate new hash entry.
+ *    2. Overflow hash bucket.
+ *  Under any of above circumstances, caller should handle the situation.
+ */
+MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* bucket_idx,
+  size_t* pos_idx) {
+  int index = hash_to_index(key.hash());
+  assert(index >= 0, "Negative index");
+  *bucket_idx = (size_t)index;
+  *pos_idx = 0;
+
+  // First entry for this hash bucket
+  if (_table[index] == NULL) {
+    MallocSiteHashtableEntry* entry = new_entry(key);
+    // OOM check
+    if (entry == NULL) return NULL;
+
+    // swap in the head
+    if (Atomic::cmpxchg_ptr((void*)entry, (volatile void *)&_table[index], NULL) == NULL) {
+      return entry->data();
+    }
+
+    delete entry;
+  }
+
+  MallocSiteHashtableEntry* head = _table[index];
+  while (head != NULL && (*pos_idx) <= MAX_BUCKET_LENGTH) {
+    MallocSite* site = head->data();
+    if (site->equals(key)) {
+      // found matched entry
+      return head->data();
+    }
+
+    if (head->next() == NULL && (*pos_idx) < MAX_BUCKET_LENGTH) {
+      MallocSiteHashtableEntry* entry = new_entry(key);
+      // OOM check
+      if (entry == NULL) return NULL;
+      if (head->atomic_insert(entry)) {
+        (*pos_idx) ++;
+        return entry->data();
+      }
+      // contended, other thread won
+      delete entry;
+    }
+    head = (MallocSiteHashtableEntry*)head->next();
+    (*pos_idx) ++;
+  }
+  return NULL;
+}
+
+// Access malloc site
+MallocSite* MallocSiteTable::malloc_site(size_t bucket_idx, size_t pos_idx) {
+  assert(bucket_idx < table_size, "Invalid bucket index");
+  MallocSiteHashtableEntry* head = _table[bucket_idx];
+  for (size_t index = 0; index < pos_idx && head != NULL;
+    index ++, head = (MallocSiteHashtableEntry*)head->next());
+  assert(head != NULL, "Invalid position index");
+  return head->data();
+}
+
+// Allocates MallocSiteHashtableEntry object. Special call stack
+// (pre-installed allocation site) has to be used to avoid infinite
+// recursion.
+MallocSiteHashtableEntry* MallocSiteTable::new_entry(const NativeCallStack& key) {
+  void* p = AllocateHeap(sizeof(MallocSiteHashtableEntry), mtNMT,
+    *hash_entry_allocation_stack(), AllocFailStrategy::RETURN_NULL);
+  return ::new (p) MallocSiteHashtableEntry(key);
+}
+
+void MallocSiteTable::reset() {
+  for (int index = 0; index < table_size; index ++) {
+    MallocSiteHashtableEntry* head = _table[index];
+    _table[index] = NULL;
+    delete_linked_list(head);
+  }
+}
+
+void MallocSiteTable::delete_linked_list(MallocSiteHashtableEntry* head) {
+  MallocSiteHashtableEntry* p;
+  while (head != NULL) {
+    p = head;
+    head = (MallocSiteHashtableEntry*)head->next();
+    if (p != (MallocSiteHashtableEntry*)_hash_entry_allocation_site) {
+      delete p;
+    }
+  }
+}
+
+void MallocSiteTable::shutdown() {
+  AccessLock locker(&_access_count);
+  locker.exclusiveLock();
+  reset();
+}
+
+bool MallocSiteTable::walk_malloc_site(MallocSiteWalker* walker) {
+  assert(walker != NULL, "NuLL walker");
+  AccessLock locker(&_access_count);
+  if (locker.sharedLock()) {
+    NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
+    return walk(walker);
+  }
+  return false;
+}
+
+
+void MallocSiteTable::AccessLock::exclusiveLock() {
+  jint target;
+  jint val;
+
+  assert(_lock_state != ExclusiveLock, "Can only call once");
+  assert(*_lock >= 0, "Can not content exclusive lock");
+
+  // make counter negative to block out shared locks
+  do {
+    val = *_lock;
+    target = _MAGIC_ + *_lock;
+  } while (Atomic::cmpxchg(target, _lock, val) != val);
+
+  // wait for all readers to exit
+  while (*_lock != _MAGIC_) {
+#ifdef _WINDOWS
+    os::naked_short_sleep(1);
+#else
+    os::naked_yield();
+#endif
+  }
+  _lock_state = ExclusiveLock;
+}
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/mallocSiteTable.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
+#define SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
+
+#if INCLUDE_NMT
+
+#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
+#include "services/allocationSite.hpp"
+#include "services/mallocTracker.hpp"
+#include "services/nmtCommon.hpp"
+
+// MallocSite represents a code path that eventually calls
+// os::malloc() to allocate memory
+class MallocSite : public AllocationSite<MemoryCounter> {
+ public:
+  MallocSite() :
+    AllocationSite<MemoryCounter>(emptyStack) { }
+
+  MallocSite(const NativeCallStack& stack) :
+    AllocationSite<MemoryCounter>(stack) { }
+
+  void allocate(size_t size)      { data()->allocate(size);   }
+  void deallocate(size_t size)    { data()->deallocate(size); }
+
+  // Memory allocated from this code path
+  size_t size()  const { return peek()->size(); }
+  // The number of calls were made
+  size_t count() const { return peek()->count(); }
+};
+
+// Malloc site hashtable entry
+class MallocSiteHashtableEntry : public CHeapObj<mtNMT> {
+ private:
+  MallocSite                _malloc_site;
+  MallocSiteHashtableEntry* _next;
+
+ public:
+  MallocSiteHashtableEntry() : _next(NULL) { }
+
+  MallocSiteHashtableEntry(NativeCallStack stack):
+    _malloc_site(stack), _next(NULL) { }
+
+  inline const MallocSiteHashtableEntry* next() const {
+    return _next;
+  }
+
+  // Insert an entry atomically.
+  // Return true if the entry is inserted successfully.
+  // The operation can be failed due to contention from other thread.
+  bool atomic_insert(const MallocSiteHashtableEntry* entry) {
+    return (Atomic::cmpxchg_ptr((void*)entry, (volatile void*)&_next,
+      NULL) == NULL);
+  }
+
+  void set_callsite(const MallocSite& site) {
+    _malloc_site = site;
+  }
+
+  inline const MallocSite* peek() const { return &_malloc_site; }
+  inline MallocSite* data()             { return &_malloc_site; }
+
+  inline long hash() const { return _malloc_site.hash(); }
+  inline bool equals(const NativeCallStack& stack) const {
+    return _malloc_site.equals(stack);
+  }
+  // Allocation/deallocation on this allocation site
+  inline void allocate(size_t size)   { _malloc_site.allocate(size);   }
+  inline void deallocate(size_t size) { _malloc_site.deallocate(size); }
+  // Memory counters
+  inline size_t size() const  { return _malloc_site.size();  }
+  inline size_t count() const { return _malloc_site.count(); }
+};
+
+// The walker walks every entry on MallocSiteTable
+class MallocSiteWalker : public StackObj {
+ public:
+   virtual bool do_malloc_site(const MallocSite* e) { return false; }
+};
+
+/*
+ * Native memory tracking call site table.
+ * The table is only needed when detail tracking is enabled.
+ */
+class MallocSiteTable : AllStatic {
+ private:
+  // The number of hash bucket in this hashtable. The number should
+  // be tuned if malloc activities changed significantly.
+  // The statistics data can be obtained via Jcmd
+  // jcmd <pid> VM.native_memory statistics.
+
+  // Currently, (number of buckets / number of entires) ratio is
+  // about 1 / 6
+  enum {
+    table_base_size = 128,   // The base size is calculated from statistics to give
+                             // table ratio around 1:6
+    table_size = (table_base_size * NMT_TrackingStackDepth - 1)
+  };
+
+
+  // This is a very special lock, that allows multiple shared accesses (sharedLock), but
+  // once exclusive access (exclusiveLock) is requested, all shared accesses are
+  // rejected forever.
+  class AccessLock : public StackObj {
+    enum LockState {
+      NoLock,
+      SharedLock,
+      ExclusiveLock
+    };
+
+   private:
+    // A very large negative number. The only possibility to "overflow"
+    // this number is when there are more than -min_jint threads in
+    // this process, which is not going to happen in foreseeable future.
+    const static int _MAGIC_ = min_jint;
+
+    LockState      _lock_state;
+    volatile int*  _lock;
+   public:
+    AccessLock(volatile int* lock) :
+      _lock(lock), _lock_state(NoLock) {
+    }
+
+    ~AccessLock() {
+      if (_lock_state == SharedLock) {
+        Atomic::dec((volatile jint*)_lock);
+      }
+    }
+    // Acquire shared lock.
+    // Return true if shared access is granted.
+    inline bool sharedLock() {
+      jint res = Atomic::add(1, _lock);
+      if (res < 0) {
+        Atomic::add(-1, _lock);
+        return false;
+      }
+      _lock_state = SharedLock;
+      return true;
+    }
+    // Acquire exclusive lock
+    void exclusiveLock();
+ };
+
+ public:
+  static bool initialize();
+  static void shutdown();
+
+  NOT_PRODUCT(static int access_peak_count() { return _peak_count; })
+
+  // Number of hash buckets
+  static inline int hash_buckets()      { return (int)table_size; }
+
+  // Access and copy a call stack from this table. Shared lock should be
+  // acquired before access the entry.
+  static inline bool access_stack(NativeCallStack& stack, size_t bucket_idx,
+    size_t pos_idx) {
+    AccessLock locker(&_access_count);
+    if (locker.sharedLock()) {
+      NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
+      MallocSite* site = malloc_site(bucket_idx, pos_idx);
+      if (site != NULL) {
+        stack = *site->call_stack();
+        return true;
+      }
+    }
+    return false;
+  }
+
+  // Record a new allocation from specified call path.
+  // Return true if the allocation is recorded successfully, bucket_idx
+  // and pos_idx are also updated to indicate the entry where the allocation
+  // information was recorded.
+  // Return false only occurs under rare scenarios:
+  //  1. out of memory
+  //  2. overflow hash bucket
+  static inline bool allocation_at(const NativeCallStack& stack, size_t size,
+    size_t* bucket_idx, size_t* pos_idx) {
+    AccessLock locker(&_access_count);
+    if (locker.sharedLock()) {
+      NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
+      MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx);
+      if (site != NULL) site->allocate(size);
+      return site != NULL;
+    }
+    return false;
+  }
+
+  // Record memory deallocation. bucket_idx and pos_idx indicate where the allocation
+  // information was recorded.
+  static inline bool deallocation_at(size_t size, size_t bucket_idx, size_t pos_idx) {
+    AccessLock locker(&_access_count);
+    if (locker.sharedLock()) {
+      NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
+      MallocSite* site = malloc_site(bucket_idx, pos_idx);
+      if (site != NULL) {
+        site->deallocate(size);
+        return true;
+      }
+    }
+    return false;
+  }
+
+  // Walk this table.
+  static bool walk_malloc_site(MallocSiteWalker* walker);
+
+ private:
+  static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key);
+  static void reset();
+
+  // Delete a bucket linked list
+  static void delete_linked_list(MallocSiteHashtableEntry* head);
+
+  static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx);
+  static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx);
+  static bool walk(MallocSiteWalker* walker);
+
+  static inline int hash_to_index(int  hash) {
+    hash = (hash > 0) ? hash : (-hash);
+    return (hash % table_size);
+  }
+
+  static inline const NativeCallStack* hash_entry_allocation_stack() {
+    return (NativeCallStack*)_hash_entry_allocation_stack;
+  }
+
+ private:
+  // Counter for counting concurrent access
+  static volatile int                _access_count;
+
+  // The callsite hashtable. It has to be a static table,
+  // since malloc call can come from C runtime linker.
+  static MallocSiteHashtableEntry*   _table[table_size];
+
+
+  // Reserve enough memory for placing the objects
+
+  // The memory for hashtable entry allocation stack object
+  static size_t _hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
+  // The memory for hashtable entry allocation callsite object
+  static size_t _hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
+  NOT_PRODUCT(static int     _peak_count;)
+};
+
+#endif // INCLUDE_NMT
+#endif // SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/mallocTracker.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+
+#include "runtime/atomic.hpp"
+#include "runtime/atomic.inline.hpp"
+#include "services/mallocSiteTable.hpp"
+#include "services/mallocTracker.hpp"
+#include "services/mallocTracker.inline.hpp"
+#include "services/memTracker.hpp"
+
+size_t MallocMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
+
+// Total malloc'd memory amount
+size_t MallocMemorySnapshot::total() const {
+  size_t amount = 0;
+  for (int index = 0; index < mt_number_of_types; index ++) {
+    amount += _malloc[index].malloc_size();
+  }
+  amount += _tracking_header.size() + total_arena();
+  return amount;
+}
+
+// Total malloc'd memory used by arenas
+size_t MallocMemorySnapshot::total_arena() const {
+  size_t amount = 0;
+  for (int index = 0; index < mt_number_of_types; index ++) {
+    amount += _malloc[index].arena_size();
+  }
+  return amount;
+}
+
+
+void MallocMemorySnapshot::reset() {
+  _tracking_header.reset();
+  for (int index = 0; index < mt_number_of_types; index ++) {
+    _malloc[index].reset();
+  }
+}
+
+// Make adjustment by subtracting chunks used by arenas
+// from total chunks to get total free chunck size
+void MallocMemorySnapshot::make_adjustment() {
+  size_t arena_size = total_arena();
+  int chunk_idx = NMTUtil::flag_to_index(mtChunk);
+  _malloc[chunk_idx].record_free(arena_size);
+}
+
+
+void MallocMemorySummary::initialize() {
+  assert(sizeof(_snapshot) >= sizeof(MallocMemorySnapshot), "Sanity Check");
+  // Uses placement new operator to initialize static area.
+  ::new ((void*)_snapshot)MallocMemorySnapshot();
+}
+
+void MallocHeader::release() const {
+  // Tracking already shutdown, no housekeeping is needed anymore
+  if (MemTracker::tracking_level() <= NMT_minimal) return;
+
+  MallocMemorySummary::record_free(size(), flags());
+  MallocMemorySummary::record_free_malloc_header(sizeof(MallocHeader));
+  if (tracking_level() == NMT_detail) {
+    MallocSiteTable::deallocation_at(size(), _bucket_idx, _pos_idx);
+  }
+}
+
+bool MallocHeader::record_malloc_site(const NativeCallStack& stack, size_t size,
+  size_t* bucket_idx, size_t* pos_idx) const {
+  bool ret =  MallocSiteTable::allocation_at(stack, size, bucket_idx, pos_idx);
+
+  // Something went wrong, could be OOM or overflow malloc site table.
+  // We want to keep tracking data under OOM circumstance, so transition to
+  // summary tracking.
+  if (!ret) {
+    MemTracker::transition_to(NMT_summary);
+  }
+  return ret;
+}
+
+bool MallocHeader::get_stack(NativeCallStack& stack) const {
+  return MallocSiteTable::access_stack(stack, _bucket_idx, _pos_idx);
+}
+
+bool MallocTracker::initialize(NMT_TrackingLevel level) {
+  if (level >= NMT_summary) {
+    MallocMemorySummary::initialize();
+  }
+
+  if (level == NMT_detail) {
+    return MallocSiteTable::initialize();
+  }
+  return true;
+}
+
+bool MallocTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
+  assert(from != NMT_off, "Can not transition from off state");
+  assert(to != NMT_off, "Can not transition to off state");
+  if (from == NMT_minimal) {
+    MallocMemorySummary::reset();
+  }
+
+  if (to == NMT_detail) {
+    assert(from == NMT_minimal || from == NMT_summary, "Just check");
+    return MallocSiteTable::initialize();
+  } else if (from == NMT_detail) {
+    assert(to == NMT_minimal || to == NMT_summary, "Just check");
+    MallocSiteTable::shutdown();
+  }
+  return true;
+}
+
+// Record a malloc memory allocation
+void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
+  const NativeCallStack& stack, NMT_TrackingLevel level) {
+  void*         memblock;      // the address for user data
+  MallocHeader* header = NULL;
+
+  if (malloc_base == NULL) {
+    return NULL;
+  }
+
+  // Check malloc size, size has to <= MAX_MALLOC_SIZE. This is only possible on 32-bit
+  // systems, when malloc size >= 1GB, but is is safe to assume it won't happen.
+  if (size > MAX_MALLOC_SIZE) {
+    fatal("Should not use malloc for big memory block, use virtual memory instead");
+  }
+  // Uses placement global new operator to initialize malloc header
+  switch(level) {
+    case NMT_off:
+      return malloc_base;
+    case NMT_minimal: {
+      MallocHeader* hdr = ::new (malloc_base) MallocHeader();
+      break;
+    }
+    case NMT_summary: {
+      header = ::new (malloc_base) MallocHeader(size, flags);
+      break;
+    }
+    case NMT_detail: {
+      header = ::new (malloc_base) MallocHeader(size, flags, stack);
+      break;
+    }
+    default:
+      ShouldNotReachHere();
+  }
+  memblock = (void*)((char*)malloc_base + sizeof(MallocHeader));
+
+  // The alignment check: 8 bytes alignment for 32 bit systems.
+  //                      16 bytes alignment for 64-bit systems.
+  assert(((size_t)memblock & (sizeof(size_t) * 2 - 1)) == 0, "Alignment check");
+
+  // Sanity check
+  assert(get_memory_tracking_level(memblock) == level,
+    "Wrong tracking level");
+
+#ifdef ASSERT
+  if (level > NMT_minimal) {
+    // Read back
+    assert(get_size(memblock) == size,   "Wrong size");
+    assert(get_flags(memblock) == flags, "Wrong flags");
+  }
+#endif
+
+  return memblock;
+}
+
+void* MallocTracker::record_free(void* memblock) {
+  // Never turned on
+  if (MemTracker::tracking_level() == NMT_off ||
+      memblock == NULL) {
+    return memblock;
+  }
+  MallocHeader* header = malloc_header(memblock);
+  header->release();
+
+  return (void*)header;
+}
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/mallocTracker.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,424 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
+#define SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
+
+#if INCLUDE_NMT
+
+#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
+#include "services/nmtCommon.hpp"
+#include "utilities/nativeCallStack.hpp"
+
+/*
+ * This counter class counts memory allocation and deallocation,
+ * records total memory allocation size and number of allocations.
+ * The counters are updated atomically.
+ */
+class MemoryCounter VALUE_OBJ_CLASS_SPEC {
+ private:
+  size_t   _count;
+  size_t   _size;
+
+  DEBUG_ONLY(size_t   _peak_count;)
+  DEBUG_ONLY(size_t   _peak_size; )
+
+ public:
+  MemoryCounter() : _count(0), _size(0) {
+    DEBUG_ONLY(_peak_count = 0;)
+    DEBUG_ONLY(_peak_size  = 0;)
+  }
+
+  // Reset counters
+  void reset() {
+    _size  = 0;
+    _count = 0;
+    DEBUG_ONLY(_peak_size = 0;)
+    DEBUG_ONLY(_peak_count = 0;)
+  }
+
+  inline void allocate(size_t sz) {
+    Atomic::add(1, (volatile MemoryCounterType*)&_count);
+    if (sz > 0) {
+      Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
+      DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size));
+    }
+    DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);)
+  }
+
+  inline void deallocate(size_t sz) {
+    assert(_count > 0, "Negative counter");
+    assert(_size >= sz, "Negative size");
+    Atomic::add(-1, (volatile MemoryCounterType*)&_count);
+    if (sz > 0) {
+      Atomic::add(-(MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
+    }
+  }
+
+  inline void resize(long sz) {
+    if (sz != 0) {
+      Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
+      DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);)
+    }
+  }
+
+  inline size_t count() const { return _count; }
+  inline size_t size()  const { return _size;  }
+  DEBUG_ONLY(inline size_t peak_count() const { return _peak_count; })
+  DEBUG_ONLY(inline size_t peak_size()  const { return _peak_size; })
+
+};
+
+/*
+ * Malloc memory used by a particular subsystem.
+ * It includes the memory acquired through os::malloc()
+ * call and arena's backing memory.
+ */
+class MallocMemory VALUE_OBJ_CLASS_SPEC {
+ private:
+  MemoryCounter _malloc;
+  MemoryCounter _arena;
+
+ public:
+  MallocMemory() { }
+
+  inline void record_malloc(size_t sz) {
+    _malloc.allocate(sz);
+  }
+
+  inline void record_free(size_t sz) {
+    _malloc.deallocate(sz);
+  }
+
+  inline void record_new_arena() {
+    _arena.allocate(0);
+  }
+
+  inline void record_arena_free() {
+    _arena.deallocate(0);
+  }
+
+  inline void record_arena_size_change(long sz) {
+    _arena.resize(sz);
+  }
+
+  void reset() {
+    _malloc.reset();
+    _arena.reset();
+  }
+
+  inline size_t malloc_size()  const { return _malloc.size(); }
+  inline size_t malloc_count() const { return _malloc.count();}
+  inline size_t arena_size()   const { return _arena.size();  }
+  inline size_t arena_count()  const { return _arena.count(); }
+
+  DEBUG_ONLY(inline const MemoryCounter& malloc_counter() const { return _malloc; })
+  DEBUG_ONLY(inline const MemoryCounter& arena_counter()  const { return _arena;  })
+};
+
+class MallocMemorySummary;
+
+// A snapshot of malloc'd memory, includes malloc memory
+// usage by types and memory used by tracking itself.
+class MallocMemorySnapshot : public ResourceObj {
+  friend class MallocMemorySummary;
+
+ private:
+  MallocMemory      _malloc[mt_number_of_types];
+  MemoryCounter     _tracking_header;
+
+
+ public:
+  inline MallocMemory*  by_type(MEMFLAGS flags) {
+    int index = NMTUtil::flag_to_index(flags);
+    return &_malloc[index];
+  }
+
+  inline MallocMemory* by_index(int index) {
+    assert(index >= 0, "Index out of bound");
+    assert(index < mt_number_of_types, "Index out of bound");
+    return &_malloc[index];
+  }
+
+  inline MemoryCounter* malloc_overhead() {
+    return &_tracking_header;
+  }
+
+  // Total malloc'd memory amount
+  size_t total() const;
+  // Total malloc'd memory used by arenas
+  size_t total_arena() const;
+
+  inline size_t thread_count() {
+    return by_type(mtThreadStack)->malloc_count();
+  }
+
+  void reset();
+
+  void copy_to(MallocMemorySnapshot* s) {
+    s->_tracking_header = _tracking_header;
+    for (int index = 0; index < mt_number_of_types; index ++) {
+      s->_malloc[index] = _malloc[index];
+    }
+  }
+
+  // Make adjustment by subtracting chunks used by arenas
+  // from total chunks to get total free chunk size
+  void make_adjustment();
+};
+
+/*
+ * This class is for collecting malloc statistics at summary level
+ */
+class MallocMemorySummary : AllStatic {
+ private:
+  // Reserve memory for placement of MallocMemorySnapshot object
+  static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
+
+ public:
+   static void initialize();
+
+   static inline void record_malloc(size_t size, MEMFLAGS flag) {
+     as_snapshot()->by_type(flag)->record_malloc(size);
+   }
+
+   static inline void record_free(size_t size, MEMFLAGS flag) {
+     as_snapshot()->by_type(flag)->record_free(size);
+   }
+
+   static inline void record_new_arena(MEMFLAGS flag) {
+     as_snapshot()->by_type(flag)->record_new_arena();
+   }
+
+   static inline void record_arena_free(MEMFLAGS flag) {
+     as_snapshot()->by_type(flag)->record_arena_free();
+   }
+
+   static inline void record_arena_size_change(long size, MEMFLAGS flag) {
+     as_snapshot()->by_type(flag)->record_arena_size_change(size);
+   }
+
+   static void snapshot(MallocMemorySnapshot* s) {
+     as_snapshot()->copy_to(s);
+     s->make_adjustment();
+   }
+
+   // Record memory used by malloc tracking header
+   static inline void record_new_malloc_header(size_t sz) {
+     as_snapshot()->malloc_overhead()->allocate(sz);
+   }
+
+   static inline void record_free_malloc_header(size_t sz) {
+     as_snapshot()->malloc_overhead()->deallocate(sz);
+   }
+
+   // The memory used by malloc tracking headers
+   static inline size_t tracking_overhead() {
+     return as_snapshot()->malloc_overhead()->size();
+   }
+
+   // Reset all counters to zero
+   static void reset() {
+     as_snapshot()->reset();
+   }
+
+  static MallocMemorySnapshot* as_snapshot() {
+    return (MallocMemorySnapshot*)_snapshot;
+  }
+};
+
+
+/*
+ * Malloc tracking header.
+ * To satisfy malloc alignment requirement, NMT uses 2 machine words for tracking purpose,
+ * which ensures 8-bytes alignment on 32-bit systems and 16-bytes on 64-bit systems (Product build).
+ */
+
+class MallocHeader VALUE_OBJ_CLASS_SPEC {
+#ifdef _LP64
+  size_t           _size      : 62;
+  size_t           _level     : 2;
+  size_t           _flags     : 8;
+  size_t           _pos_idx   : 16;
+  size_t           _bucket_idx: 40;
+#define MAX_MALLOCSITE_TABLE_SIZE ((size_t)1 << 40)
+#define MAX_BUCKET_LENGTH         ((size_t)(1 << 16))
+#define MAX_MALLOC_SIZE           (((size_t)1 << 62) - 1)
+#else
+  size_t           _size      : 30;
+  size_t           _level     : 2;
+  size_t           _flags     : 8;
+  size_t           _pos_idx   : 8;
+  size_t           _bucket_idx: 16;
+#define MAX_MALLOCSITE_TABLE_SIZE  ((size_t)(1 << 16))
+#define MAX_BUCKET_LENGTH          ((size_t)(1 << 8))
+// Max malloc size = 1GB - 1 on 32 bit system, such has total 4GB memory
+#define MAX_MALLOC_SIZE            ((size_t)(1 << 30) - 1)
+#endif  // _LP64
+
+ public:
+  // Summary tracking header
+  MallocHeader(size_t size, MEMFLAGS flags) {
+    assert(sizeof(MallocHeader) == sizeof(void*) * 2,
+      "Wrong header size");
+
+    _level = NMT_summary;
+    _flags = flags;
+    set_size(size);
+    MallocMemorySummary::record_malloc(size, flags);
+    MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
+  }
+  // Detail tracking header
+  MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack) {
+    assert(sizeof(MallocHeader) == sizeof(void*) * 2,
+      "Wrong header size");
+
+    _level = NMT_detail;
+    _flags = flags;
+    set_size(size);
+    size_t bucket_idx;
+    size_t pos_idx;
+    if (record_malloc_site(stack, size, &bucket_idx, &pos_idx)) {
+      assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index");
+      assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index");
+      _bucket_idx = bucket_idx;
+      _pos_idx = pos_idx;
+    }
+    MallocMemorySummary::record_malloc(size, flags);
+    MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
+  }
+  // Minimal tracking header
+  MallocHeader() {
+    assert(sizeof(MallocHeader) == sizeof(void*) * 2,
+      "Wrong header size");
+
+    _level = (unsigned short)NMT_minimal;
+  }
+
+  inline NMT_TrackingLevel tracking_level() const {
+    return (NMT_TrackingLevel)_level;
+  }
+
+  inline size_t   size()  const { return _size; }
+  inline MEMFLAGS flags() const { return (MEMFLAGS)_flags; }
+  bool get_stack(NativeCallStack& stack) const;
+
+  // Cleanup tracking information before the memory is released.
+  void release() const;
+
+ private:
+  inline void set_size(size_t size) {
+    assert(size <= MAX_MALLOC_SIZE, "Malloc size too large, should use virtual memory?");
+    _size = size;
+  }
+  bool record_malloc_site(const NativeCallStack& stack, size_t size,
+    size_t* bucket_idx, size_t* pos_idx) const;
+};
+
+
+// Main class called from MemTracker to track malloc activities
+class MallocTracker : AllStatic {
+ public:
+  // Initialize malloc tracker for specific tracking level
+  static bool initialize(NMT_TrackingLevel level);
+
+  static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
+
+  // malloc tracking header size for specific tracking level
+  static inline size_t malloc_header_size(NMT_TrackingLevel level) {
+    return (level == NMT_off) ? 0 : sizeof(MallocHeader);
+  }
+
+  // Parameter name convention:
+  // memblock :   the beginning address for user data
+  // malloc_base: the beginning address that includes malloc tracking header
+  //
+  // The relationship:
+  // memblock = (char*)malloc_base + sizeof(nmt header)
+  //
+
+  // Record  malloc on specified memory block
+  static void* record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
+    const NativeCallStack& stack, NMT_TrackingLevel level);
+
+  // Record free on specified memory block
+  static void* record_free(void* memblock);
+
+  // Get tracking level of specified memory block
+  static inline NMT_TrackingLevel get_memory_tracking_level(void* memblock);
+
+
+  // Offset memory address to header address
+  static inline void* get_base(void* memblock);
+  static inline void* get_base(void* memblock, NMT_TrackingLevel level) {
+    if (memblock == NULL || level == NMT_off) return memblock;
+    return (char*)memblock - malloc_header_size(level);
+  }
+
+  // Get memory size
+  static inline size_t get_size(void* memblock) {
+    MallocHeader* header = malloc_header(memblock);
+    assert(header->tracking_level() >= NMT_summary,
+      "Wrong tracking level");
+    return header->size();
+  }
+
+  // Get memory type
+  static inline MEMFLAGS get_flags(void* memblock) {
+    MallocHeader* header = malloc_header(memblock);
+    assert(header->tracking_level() >= NMT_summary,
+      "Wrong tracking level");
+    return header->flags();
+  }
+
+  // Get header size
+  static inline size_t get_header_size(void* memblock) {
+    return (memblock == NULL) ? 0 : sizeof(MallocHeader);
+  }
+
+  static inline void record_new_arena(MEMFLAGS flags) {
+    MallocMemorySummary::record_new_arena(flags);
+  }
+
+  static inline void record_arena_free(MEMFLAGS flags) {
+    MallocMemorySummary::record_arena_free(flags);
+  }
+
+  static inline void record_arena_size_change(int size, MEMFLAGS flags) {
+    MallocMemorySummary::record_arena_size_change(size, flags);
+  }
+ private:
+  static inline MallocHeader* malloc_header(void *memblock) {
+    assert(memblock != NULL, "NULL pointer");
+    MallocHeader* header = (MallocHeader*)((char*)memblock - sizeof(MallocHeader));
+    assert(header->tracking_level() >= NMT_minimal, "Bad header");
+    return header;
+  }
+};
+
+#endif // INCLUDE_NMT
+
+
+#endif //SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/mallocTracker.inline.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_MALLOC_TRACKER_INLINE_HPP
+#define SHARE_VM_SERVICES_MALLOC_TRACKER_INLINE_HPP
+
+#include "services/mallocTracker.hpp"
+#include "services/memTracker.hpp"
+
+inline NMT_TrackingLevel MallocTracker::get_memory_tracking_level(void* memblock) {
+  assert(memblock != NULL, "Sanity check");
+  if (MemTracker::tracking_level() == NMT_off) return NMT_off;
+  MallocHeader* header = malloc_header(memblock);
+  return header->tracking_level();
+}
+
+inline void* MallocTracker::get_base(void* memblock){
+  return get_base(memblock, MemTracker::tracking_level());
+}
+
+#endif // SHARE_VM_SERVICES_MALLOC_TRACKER_INLINE_HPP
+
--- a/hotspot/src/share/vm/services/management.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/services/management.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1696,6 +1696,9 @@
   } else if (flag->is_uint64_t()) {
     global->value.j = (jlong)flag->get_uint64_t();
     global->type = JMM_VMGLOBAL_TYPE_JLONG;
+  } else if (flag->is_size_t()) {
+    global->value.j = (jlong)flag->get_size_t();
+    global->type = JMM_VMGLOBAL_TYPE_JLONG;
   } else if (flag->is_ccstr()) {
     Handle str = java_lang_String::create_from_str(flag->get_ccstr(), CHECK_false);
     global->value.l = (jobject)JNIHandles::make_local(env, str());
@@ -1851,6 +1854,9 @@
   } else if (flag->is_uint64_t()) {
     uint64_t uvalue = (uint64_t)new_value.j;
     succeed = CommandLineFlags::uint64_tAtPut(name, &uvalue, Flag::MANAGEMENT);
+  } else if (flag->is_size_t()) {
+    size_t svalue = (size_t)new_value.j;
+    succeed = CommandLineFlags::size_tAtPut(name, &svalue, Flag::MANAGEMENT);
   } else if (flag->is_ccstr()) {
     oop str = JNIHandles::resolve_external_guard(new_value.l);
     if (str == NULL) {
@@ -1914,7 +1920,7 @@
   ResourceMark rm(THREAD); // thread->name() uses ResourceArea
 
   assert(thread->name() != NULL, "All threads should have a name");
-  _names_chars[_count] = strdup(thread->name());
+  _names_chars[_count] = os::strdup(thread->name());
   _times->long_at_put(_count, os::is_thread_cpu_time_supported() ?
                         os::thread_cpu_time(thread) : -1);
   _count++;
@@ -1932,7 +1938,7 @@
 
 ThreadTimesClosure::~ThreadTimesClosure() {
   for (int i = 0; i < _count; i++) {
-    free(_names_chars[i]);
+    os::free(_names_chars[i]);
   }
   FREE_C_HEAP_ARRAY(char *, _names_chars, mtInternal);
 }
--- a/hotspot/src/share/vm/services/memBaseline.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/services/memBaseline.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,471 +22,301 @@
  *
  */
 #include "precompiled.hpp"
+
 #include "memory/allocation.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/thread.inline.hpp"
 #include "services/memBaseline.hpp"
 #include "services/memTracker.hpp"
 
-
-MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
-  {mtJavaHeap,   "Java Heap"},
-  {mtClass,      "Class"},
-  {mtThreadStack,"Thread Stack"},
-  {mtThread,     "Thread"},
-  {mtCode,       "Code"},
-  {mtGC,         "GC"},
-  {mtCompiler,   "Compiler"},
-  {mtInternal,   "Internal"},
-  {mtOther,      "Other"},
-  {mtSymbol,     "Symbol"},
-  {mtNMT,        "Memory Tracking"},
-  {mtTracing,    "Tracing"},
-  {mtChunk,      "Pooled Free Chunks"},
-  {mtClassShared,"Shared spaces for classes"},
-  {mtTest,       "Test"},
-  {mtNone,       "Unknown"}  // It can happen when type tagging records are lagging
-                             // behind
-};
-
-MemBaseline::MemBaseline() {
-  _baselined = false;
-
-  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
-    _malloc_data[index].set_type(MemType2NameMap[index]._flag);
-    _vm_data[index].set_type(MemType2NameMap[index]._flag);
-    _arena_data[index].set_type(MemType2NameMap[index]._flag);
+/*
+ * Sizes are sorted in descenting order for reporting
+ */
+int compare_malloc_size(const MallocSite& s1, const MallocSite& s2) {
+  if (s1.size() == s2.size()) {
+    return 0;
+  } else if (s1.size() > s2.size()) {
+    return -1;
+  } else {
+    return 1;
   }
-
-  _malloc_cs = NULL;
-  _vm_cs = NULL;
-  _vm_map = NULL;
-
-  _number_of_classes = 0;
-  _number_of_threads = 0;
 }
 
 
-void MemBaseline::clear() {
-  if (_malloc_cs != NULL) {
-    delete _malloc_cs;
-    _malloc_cs = NULL;
+int compare_virtual_memory_size(const VirtualMemoryAllocationSite& s1,
+  const VirtualMemoryAllocationSite& s2) {
+  if (s1.reserved() == s2.reserved()) {
+    return 0;
+  } else if (s1.reserved() > s2.reserved()) {
+    return -1;
+  } else {
+    return 1;
   }
+}
 
-  if (_vm_cs != NULL) {
-    delete _vm_cs;
-    _vm_cs = NULL;
-  }
-
-  if (_vm_map != NULL) {
-    delete _vm_map;
-    _vm_map = NULL;
-  }
-
-  reset();
+// Sort into allocation site addresses order for baseline comparison
+int compare_malloc_site(const MallocSite& s1, const MallocSite& s2) {
+  return s1.call_stack()->compare(*s2.call_stack());
 }
 
 
-void MemBaseline::reset() {
-  _baselined = false;
-  _total_vm_reserved = 0;
-  _total_vm_committed = 0;
-  _total_malloced = 0;
-  _number_of_classes = 0;
-
-  if (_malloc_cs != NULL) _malloc_cs->clear();
-  if (_vm_cs != NULL) _vm_cs->clear();
-  if (_vm_map != NULL) _vm_map->clear();
-
-  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
-    _malloc_data[index].clear();
-    _vm_data[index].clear();
-    _arena_data[index].clear();
-  }
+int compare_virtual_memory_site(const VirtualMemoryAllocationSite& s1,
+  const VirtualMemoryAllocationSite& s2) {
+  return s1.call_stack()->compare(*s2.call_stack());
 }
 
-MemBaseline::~MemBaseline() {
-  clear();
+/*
+ * Walker to walk malloc allocation site table
+ */
+class MallocAllocationSiteWalker : public MallocSiteWalker {
+ private:
+  SortedLinkedList<MallocSite, compare_malloc_size, ResourceObj::ARENA>
+                 _malloc_sites;
+  size_t         _count;
+
+  // Entries in MallocSiteTable with size = 0 and count = 0,
+  // when the malloc site is not longer there.
+ public:
+  MallocAllocationSiteWalker(Arena* arena) : _count(0), _malloc_sites(arena) {
+  }
+
+  inline size_t count() const { return _count; }
+
+  LinkedList<MallocSite>* malloc_sites() {
+    return &_malloc_sites;
+  }
+
+  bool do_malloc_site(const MallocSite* site) {
+    if (site->size() >= MemBaseline::SIZE_THRESHOLD) {
+      if (_malloc_sites.add(*site) != NULL) {
+        _count++;
+        return true;
+      } else {
+        return false;  // OOM
+      }
+    } else {
+      // malloc site does not meet threshold, ignore and continue
+      return true;
+    }
+  }
+};
+
+// Compare virtual memory region's base address
+int compare_virtual_memory_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
+  return r1.compare(r2);
 }
 
-// baseline malloc'd memory records, generate overall summary and summaries by
-// memory types
-bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
-  MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records);
-  MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current();
-  size_t used_arena_size = 0;
-  int index;
-  while (malloc_ptr != NULL) {
-    index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
-    size_t size = malloc_ptr->size();
-    if (malloc_ptr->is_arena_memory_record()) {
-      // We do have anonymous arenas, they are either used as value objects,
-      // which are embedded inside other objects, or used as stack objects.
-      _arena_data[index].inc(size);
-      used_arena_size += size;
-    } else {
-      _total_malloced += size;
-      _malloc_data[index].inc(size);
-      if (malloc_ptr->is_arena_record()) {
-        // see if arena memory record present
-        MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
-        if (next_malloc_ptr != NULL && next_malloc_ptr->is_arena_memory_record()) {
-          assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
-             "Arena records do not match");
-          size = next_malloc_ptr->size();
-          _arena_data[index].inc(size);
-          used_arena_size += size;
-          malloc_itr.next();
-        }
+// Walk all virtual memory regions for baselining
+class VirtualMemoryAllocationWalker : public VirtualMemoryWalker {
+ private:
+  SortedLinkedList<ReservedMemoryRegion, compare_virtual_memory_base, ResourceObj::ARENA>
+                _virtual_memory_regions;
+  size_t        _count;
+
+ public:
+  VirtualMemoryAllocationWalker(Arena* a) : _count(0), _virtual_memory_regions(a) {
+  }
+
+  bool do_allocation_site(const ReservedMemoryRegion* rgn)  {
+    if (rgn->size() >= MemBaseline::SIZE_THRESHOLD) {
+      if (_virtual_memory_regions.add(*rgn) != NULL) {
+        _count ++;
+        return true;
+      } else {
+        return false;
       }
     }
-    malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
+    return true;
   }
 
-  // substract used arena size to get size of arena chunk in free list
-  index = flag2index(mtChunk);
-  _malloc_data[index].reduce(used_arena_size);
-  // we really don't know how many chunks in free list, so just set to
-  // 0
-  _malloc_data[index].overwrite_counter(0);
+  LinkedList<ReservedMemoryRegion>* virtual_memory_allocations() {
+    return &_virtual_memory_regions;
+  }
+};
+
+
+bool MemBaseline::baseline_summary() {
+  assert(_malloc_memory_snapshot == NULL, "Malloc baseline not yet reset");
+  assert(_virtual_memory_snapshot == NULL, "Virtual baseline not yet reset");
+
+  _malloc_memory_snapshot =  new (arena()) MallocMemorySnapshot();
+  _virtual_memory_snapshot = new (arena()) VirtualMemorySnapshot();
+  if (_malloc_memory_snapshot == NULL || _virtual_memory_snapshot == NULL) {
+    return false;
+  }
+  MallocMemorySummary::snapshot(_malloc_memory_snapshot);
+  VirtualMemorySummary::snapshot(_virtual_memory_snapshot);
+  return true;
+}
+
+bool MemBaseline::baseline_allocation_sites() {
+  assert(arena() != NULL, "Just check");
+  // Malloc allocation sites
+  MallocAllocationSiteWalker malloc_walker(arena());
+  if (!MallocSiteTable::walk_malloc_site(&malloc_walker)) {
+    return false;
+  }
+
+  _malloc_sites.set_head(malloc_walker.malloc_sites()->head());
+  // The malloc sites are collected in size order
+  _malloc_sites_order = by_size;
+
+  // Virtual memory allocation sites
+  VirtualMemoryAllocationWalker virtual_memory_walker(arena());
+  if (!VirtualMemoryTracker::walk_virtual_memory(&virtual_memory_walker)) {
+    return false;
+  }
+
+  // Virtual memory allocations are collected in call stack order
+  _virtual_memory_allocations.set_head(virtual_memory_walker.virtual_memory_allocations()->head());
+
+  if (!aggregate_virtual_memory_allocation_sites()) {
+    return false;
+  }
+  // Virtual memory allocation sites are aggregrated in call stack order
+  _virtual_memory_sites_order = by_address;
 
   return true;
 }
 
-// check if there is a safepoint in progress, if so, block the thread
-// for the safepoint
-void MemBaseline::check_safepoint(JavaThread* thr) {
-  if (SafepointSynchronize::is_synchronizing()) {
-    // grab and drop the SR_lock to honor the safepoint protocol
-    MutexLocker ml(thr->SR_lock());
-  }
-}
-
-// baseline mmap'd memory records, generate overall summary and summaries by
-// memory types
-bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
-  MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
-  VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
-  int index;
-  while (vm_ptr != NULL) {
-    if (vm_ptr->is_reserved_region()) {
-      index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
-    // we use the number of thread stack to count threads
-      if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
-      _number_of_threads ++;
-    }
-      _total_vm_reserved += vm_ptr->size();
-      _vm_data[index].inc(vm_ptr->size(), 0);
-    } else {
-      _total_vm_committed += vm_ptr->size();
-      _vm_data[index].inc(0, vm_ptr->size());
-    }
-    vm_ptr = (VMMemRegion*)vm_itr.next();
-  }
-  return true;
-}
-
-// baseline malloc'd memory by callsites, but only the callsites with memory allocation
-// over 1KB are stored.
-bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
-  assert(MemTracker::track_callsite(), "detail tracking is off");
-
-  MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records));
-  MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current();
-  MallocCallsitePointer malloc_callsite;
-
-  // initailize malloc callsite array
-  if (_malloc_cs == NULL) {
-    _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
-    // out of native memory
-    if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) {
-      return false;
-    }
-  } else {
-    _malloc_cs->clear();
-  }
-
-  MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records);
-
-  // sort into callsite pc order. Details are aggregated by callsites
-  malloc_data->sort((FN_SORT)malloc_sort_by_pc);
-  bool ret = true;
-
-  // baseline memory that is totaled over 1 KB
-  while (malloc_ptr != NULL) {
-    if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) {
-      // skip thread stacks
-      if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
-        if (malloc_callsite.addr() != malloc_ptr->pc()) {
-          if ((malloc_callsite.amount()/K) > 0) {
-            if (!_malloc_cs->append(&malloc_callsite)) {
-              ret = false;
-              break;
-            }
-          }
-          malloc_callsite = MallocCallsitePointer(malloc_ptr->pc());
-        }
-        malloc_callsite.inc(malloc_ptr->size());
-      }
-    }
-    malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
+bool MemBaseline::baseline(bool summaryOnly) {
+  if (arena() == NULL) {
+    _arena = new (std::nothrow, mtNMT) Arena(mtNMT);
+    if (arena() == NULL) return false;
   }
 
-  // restore to address order. Snapshot malloc data is maintained in memory
-  // address order.
-  malloc_data->sort((FN_SORT)malloc_sort_by_addr);
-
-  if (!ret) {
-              return false;
-            }
-  // deal with last record
-  if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) {
-    if (!_malloc_cs->append(&malloc_callsite)) {
-      return false;
-    }
-  }
-  return true;
-}
-
-// baseline mmap'd memory by callsites
-bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
-  assert(MemTracker::track_callsite(), "detail tracking is off");
-
-  VMCallsitePointer  vm_callsite;
-  VMCallsitePointer* cur_callsite = NULL;
-  MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
-  VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current();
-
-  // initialize virtual memory map array
-  if (_vm_map == NULL) {
-    _vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length());
-   if (_vm_map == NULL || _vm_map->out_of_memory()) {
-     return false;
-   }
-  } else {
-    _vm_map->clear();
-  }
-
-  // initialize virtual memory callsite array
-  if (_vm_cs == NULL) {
-    _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
-    if (_vm_cs == NULL || _vm_cs->out_of_memory()) {
-      return false;
-    }
-  } else {
-    _vm_cs->clear();
-  }
-
-  // consolidate virtual memory data
-  VMMemRegionEx*     reserved_rec = NULL;
-  VMMemRegionEx*     committed_rec = NULL;
+  reset();
 
-  // vm_ptr is coming in increasing base address order
-  while (vm_ptr != NULL) {
-    if (vm_ptr->is_reserved_region()) {
-      // consolidate reserved memory regions for virtual memory map.
-      // The criteria for consolidation is:
-      // 1. two adjacent reserved memory regions
-      // 2. belong to the same memory type
-      // 3. reserved from the same callsite
-      if (reserved_rec == NULL ||
-        reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() ||
-        FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) ||
-        reserved_rec->pc() != vm_ptr->pc()) {
-        if (!_vm_map->append(vm_ptr)) {
-        return false;
-      }
-        // inserted reserved region, we need the pointer to the element in virtual
-        // memory map array.
-        reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
-      } else {
-        reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
-    }
+  _class_count = InstanceKlass::number_of_instance_classes();
 
-      if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
-      return false;
-    }
-      vm_callsite = VMCallsitePointer(vm_ptr->pc());
-      cur_callsite = &vm_callsite;
-      vm_callsite.inc(vm_ptr->size(), 0);
-    } else {
-      // consolidate committed memory regions for virtual memory map
-      // The criterial is:
-      // 1. two adjacent committed memory regions
-      // 2. committed from the same callsite
-      if (committed_rec == NULL ||
-        committed_rec->base() + committed_rec->size() != vm_ptr->addr() ||
-        committed_rec->pc() != vm_ptr->pc()) {
-        if (!_vm_map->append(vm_ptr)) {
-          return false;
-        }
-        committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
-    } else {
-        committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
-      }
-      vm_callsite.inc(0, vm_ptr->size());
-    }
-    vm_ptr = (VMMemRegionEx*)vm_itr.next();
-  }
-  // deal with last record
-  if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
+  if (!baseline_summary()) {
     return false;
   }
 
-  // sort it into callsite pc order. Details are aggregated by callsites
-  _vm_cs->sort((FN_SORT)bl_vm_sort_by_pc);
+  _baseline_type = Summary_baselined;
 
-  // walk the array to consolidate record by pc
-  MemPointerArrayIteratorImpl itr(_vm_cs);
-  VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
-  VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
-  while (next_rec != NULL) {
-    assert(callsite_rec != NULL, "Sanity check");
-    if (next_rec->addr() == callsite_rec->addr()) {
-      callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
-      itr.remove();
-      next_rec = (VMCallsitePointer*)itr.current();
-    } else {
-      callsite_rec = next_rec;
-      next_rec = (VMCallsitePointer*)itr.next();
-    }
+  // baseline details
+  if (!summaryOnly &&
+      MemTracker::tracking_level() == NMT_detail) {
+    baseline_allocation_sites();
+    _baseline_type = Detail_baselined;
   }
 
   return true;
 }
 
-// baseline a snapshot. If summary_only = false, memory usages aggregated by
-// callsites are also baselined.
-// The method call can be lengthy, especially when detail tracking info is
-// requested. So the method checks for safepoint explicitly.
-bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
-  Thread* THREAD = Thread::current();
-  assert(THREAD->is_Java_thread(), "must be a JavaThread");
-  MutexLocker snapshot_locker(snapshot._lock);
-  reset();
-  _baselined = baseline_malloc_summary(snapshot._alloc_ptrs);
-  if (_baselined) {
-    check_safepoint((JavaThread*)THREAD);
-    _baselined = baseline_vm_summary(snapshot._vm_ptrs);
+int compare_allocation_site(const VirtualMemoryAllocationSite& s1,
+  const VirtualMemoryAllocationSite& s2) {
+  return s1.call_stack()->compare(*s2.call_stack());
+}
+
+bool MemBaseline::aggregate_virtual_memory_allocation_sites() {
+  SortedLinkedList<VirtualMemoryAllocationSite, compare_allocation_site, ResourceObj::ARENA>
+    allocation_sites(arena());
+
+  VirtualMemoryAllocationIterator itr = virtual_memory_allocations();
+  const ReservedMemoryRegion* rgn;
+  VirtualMemoryAllocationSite* site;
+  while ((rgn = itr.next()) != NULL) {
+    VirtualMemoryAllocationSite tmp(*rgn->call_stack());
+    site = allocation_sites.find(tmp);
+    if (site == NULL) {
+      LinkedListNode<VirtualMemoryAllocationSite>* node =
+        allocation_sites.add(tmp);
+      if (node == NULL) return false;
+      site = node->data();
+    }
+    site->reserve_memory(rgn->size());
+    site->commit_memory(rgn->committed_size());
   }
-  _number_of_classes = snapshot.number_of_classes();
 
-  if (!summary_only && MemTracker::track_callsite() && _baselined) {
-    check_safepoint((JavaThread*)THREAD);
-    _baselined =  baseline_malloc_details(snapshot._alloc_ptrs);
-    if (_baselined) {
-      check_safepoint((JavaThread*)THREAD);
-      _baselined =  baseline_vm_details(snapshot._vm_ptrs);
-    }
-  }
-  return _baselined;
+  _virtual_memory_sites.set_head(allocation_sites.head());
+  return true;
 }
 
-
-int MemBaseline::flag2index(MEMFLAGS flag) const {
-  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
-    if (MemType2NameMap[index]._flag == flag) {
-      return index;
-    }
+MallocSiteIterator MemBaseline::malloc_sites(SortingOrder order) {
+  assert(!_malloc_sites.is_empty(), "Detail baseline?");
+  switch(order) {
+    case by_size:
+      malloc_sites_to_size_order();
+      break;
+    case by_site:
+      malloc_sites_to_allocation_site_order();
+      break;
+    case by_address:
+    default:
+      ShouldNotReachHere();
   }
-  assert(false, "no type");
-  return -1;
+  return MallocSiteIterator(_malloc_sites.head());
 }
 
-const char* MemBaseline::type2name(MEMFLAGS type) {
-  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
-    if (MemType2NameMap[index]._flag == type) {
-      return MemType2NameMap[index]._name;
-    }
+VirtualMemorySiteIterator MemBaseline::virtual_memory_sites(SortingOrder order) {
+  assert(!_virtual_memory_sites.is_empty(), "Detail baseline?");
+  switch(order) {
+    case by_size:
+      virtual_memory_sites_to_size_order();
+      break;
+    case by_site:
+      virtual_memory_sites_to_reservation_site_order();
+      break;
+    case by_address:
+    default:
+      ShouldNotReachHere();
   }
-  assert(false, err_msg("bad type %x", type));
-  return NULL;
+  return VirtualMemorySiteIterator(_virtual_memory_sites.head());
 }
 
 
-MemBaseline& MemBaseline::operator=(const MemBaseline& other) {
-  _total_malloced = other._total_malloced;
-  _total_vm_reserved = other._total_vm_reserved;
-  _total_vm_committed = other._total_vm_committed;
-
-  _baselined = other._baselined;
-  _number_of_classes = other._number_of_classes;
-
-  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
-    _malloc_data[index] = other._malloc_data[index];
-    _vm_data[index] = other._vm_data[index];
-    _arena_data[index] = other._arena_data[index];
-  }
+// Sorting allocations sites in different orders
+void MemBaseline::malloc_sites_to_size_order() {
+  if (_malloc_sites_order != by_size) {
+    SortedLinkedList<MallocSite, compare_malloc_size, ResourceObj::ARENA>
+      tmp(arena());
 
-  if (MemTracker::track_callsite()) {
-    assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory");
-    assert(other._malloc_cs != NULL && other._vm_cs != NULL,
-           "not properly baselined");
-    _malloc_cs->clear();
-    _vm_cs->clear();
-    int index;
-    for (index = 0; index < other._malloc_cs->length(); index ++) {
-      _malloc_cs->append(other._malloc_cs->at(index));
-    }
-
-    for (index = 0; index < other._vm_cs->length(); index ++) {
-      _vm_cs->append(other._vm_cs->at(index));
-    }
+    // Add malloc sites to sorted linked list to sort into size order
+    tmp.move(&_malloc_sites);
+    _malloc_sites.set_head(tmp.head());
+    tmp.set_head(NULL);
+    _malloc_sites_order = by_size;
   }
-  return *this;
 }
 
-/* compare functions for sorting */
-
-// sort snapshot malloc'd records in callsite pc order
-int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) {
-  assert(MemTracker::track_callsite(),"Just check");
-  const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1;
-  const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2;
-  return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
+void MemBaseline::malloc_sites_to_allocation_site_order() {
+  if (_malloc_sites_order != by_site) {
+    SortedLinkedList<MallocSite, compare_malloc_site, ResourceObj::ARENA>
+      tmp(arena());
+    // Add malloc sites to sorted linked list to sort into site (address) order
+    tmp.move(&_malloc_sites);
+    _malloc_sites.set_head(tmp.head());
+    tmp.set_head(NULL);
+    _malloc_sites_order = by_site;
+  }
 }
 
-// sort baselined malloc'd records in size order
-int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) {
-  assert(MemTracker::is_on(), "Just check");
-  const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
-  const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
-  return UNSIGNED_COMPARE(mp2->amount(), mp1->amount());
-}
+void MemBaseline::virtual_memory_sites_to_size_order() {
+  if (_virtual_memory_sites_order != by_size) {
+    SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_size, ResourceObj::ARENA>
+      tmp(arena());
 
-// sort baselined malloc'd records in callsite pc order
-int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
-  assert(MemTracker::is_on(), "Just check");
-  const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
-  const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
-  return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
+    tmp.move(&_virtual_memory_sites);
+
+    _virtual_memory_sites.set_head(tmp.head());
+    tmp.set_head(NULL);
+    _virtual_memory_sites_order = by_size;
+  }
 }
 
+void MemBaseline::virtual_memory_sites_to_reservation_site_order() {
+  if (_virtual_memory_sites_order != by_size) {
+    SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_site, ResourceObj::ARENA>
+      tmp(arena());
 
-// sort baselined mmap'd records in size (reserved size) order
-int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
-  assert(MemTracker::is_on(), "Just check");
-  const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
-  const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
-  return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount());
+    tmp.add(&_virtual_memory_sites);
+
+    _virtual_memory_sites.set_head(tmp.head());
+    tmp.set_head(NULL);
+
+    _virtual_memory_sites_order = by_size;
+  }
 }
 
-// sort baselined mmap'd records in callsite pc order
-int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) {
-  assert(MemTracker::is_on(), "Just check");
-  const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
-  const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
-  return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
-}
-
-
-// sort snapshot malloc'd records in memory block address order
-int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
-  assert(MemTracker::is_on(), "Just check");
-  const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
-  const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
-  int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
-  assert(p1 == p2 || delta != 0, "dup pointer");
-  return delta;
-}
-
--- a/hotspot/src/share/vm/services/memBaseline.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/services/memBaseline.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,425 +25,205 @@
 #ifndef SHARE_VM_SERVICES_MEM_BASELINE_HPP
 #define SHARE_VM_SERVICES_MEM_BASELINE_HPP
 
+#if INCLUDE_NMT
+
 #include "memory/allocation.hpp"
 #include "runtime/mutex.hpp"
-#include "services/memPtr.hpp"
-#include "services/memSnapshot.hpp"
+#include "services/mallocSiteTable.hpp"
+#include "services/mallocTracker.hpp"
+#include "services/nmtCommon.hpp"
+#include "services/virtualMemoryTracker.hpp"
+#include "utilities/linkedlist.hpp"
 
-// compare unsigned number
-#define UNSIGNED_COMPARE(a, b)  ((a > b) ? 1 : ((a == b) ? 0 : -1))
+typedef LinkedListIterator<MallocSite>                   MallocSiteIterator;
+typedef LinkedListIterator<VirtualMemoryAllocationSite>  VirtualMemorySiteIterator;
+typedef LinkedListIterator<ReservedMemoryRegion>         VirtualMemoryAllocationIterator;
 
 /*
- * MallocCallsitePointer and VMCallsitePointer are used
- * to baseline memory blocks with their callsite information.
- * They are only available when detail tracking is turned
- * on.
+ * Baseline a memory snapshot
  */
-
-/* baselined malloc record aggregated by callsite */
-class MallocCallsitePointer : public MemPointer {
- private:
-  size_t    _count;   // number of malloc invocation from this callsite
-  size_t    _amount;  // total amount of memory malloc-ed from this callsite
-
+class MemBaseline VALUE_OBJ_CLASS_SPEC {
  public:
-  MallocCallsitePointer() {
-    _count = 0;
-    _amount = 0;
-  }
-
-  MallocCallsitePointer(address pc) : MemPointer(pc) {
-    _count = 0;
-    _amount = 0;
-  }
+  enum BaselineThreshold {
+    SIZE_THRESHOLD = K        // Only allocation size over this threshold will be baselined.
+  };
 
-  MallocCallsitePointer& operator=(const MallocCallsitePointer& p) {
-    MemPointer::operator=(p);
-    _count = p.count();
-    _amount = p.amount();
-    return *this;
-  }
+  enum BaselineType {
+    Not_baselined,
+    Summary_baselined,
+    Detail_baselined
+  };
 
-  inline void inc(size_t size) {
-    _count ++;
-    _amount += size;
+  enum SortingOrder {
+    by_address,   // by memory address
+    by_size,      // by memory size
+    by_site       // by call site where the memory is allocated from
   };
 
-  inline size_t count() const {
-    return _count;
-  }
+ private:
+  // All baseline data is stored in this arena
+  Arena*                  _arena;
+
+  // Summary information
+  MallocMemorySnapshot*   _malloc_memory_snapshot;
+  VirtualMemorySnapshot*  _virtual_memory_snapshot;
+
+  size_t               _class_count;
 
-  inline size_t amount() const {
-    return _amount;
-  }
-};
+  // Allocation sites information
+  // Malloc allocation sites
+  LinkedListImpl<MallocSite, ResourceObj::ARENA>
+                       _malloc_sites;
+
+  // All virtual memory allocations
+  LinkedListImpl<ReservedMemoryRegion, ResourceObj::ARENA>
+                       _virtual_memory_allocations;
 
-// baselined virtual memory record aggregated by callsite
-class VMCallsitePointer : public MemPointer {
- private:
-  size_t     _count;              // number of invocation from this callsite
-  size_t     _reserved_amount;    // total reserved amount
-  size_t     _committed_amount;   // total committed amount
+  // Virtual memory allocations by allocation sites, always in by_address
+  // order
+  LinkedListImpl<VirtualMemoryAllocationSite, ResourceObj::ARENA>
+                       _virtual_memory_sites;
+
+  SortingOrder         _malloc_sites_order;
+  SortingOrder         _virtual_memory_sites_order;
+
+  BaselineType         _baseline_type;
 
  public:
-  VMCallsitePointer() {
-    _count = 0;
-    _reserved_amount = 0;
-    _committed_amount = 0;
-  }
-
-  VMCallsitePointer(address pc) : MemPointer(pc) {
-    _count = 0;
-    _reserved_amount = 0;
-    _committed_amount = 0;
-  }
-
-  VMCallsitePointer& operator=(const VMCallsitePointer& p) {
-    MemPointer::operator=(p);
-    _count = p.count();
-    _reserved_amount = p.reserved_amount();
-    _committed_amount = p.committed_amount();
-    return *this;
-  }
-
-  inline void inc(size_t reserved, size_t committed) {
-    _count ++;
-    _reserved_amount += reserved;
-    _committed_amount += committed;
-  }
-
-  inline size_t count() const {
-    return _count;
-  }
-
-  inline size_t reserved_amount() const {
-    return _reserved_amount;
+  // create a memory baseline
+  MemBaseline():
+    _baseline_type(Not_baselined),
+    _class_count(0),
+    _arena(NULL),
+    _malloc_memory_snapshot(NULL),
+    _virtual_memory_snapshot(NULL),
+    _malloc_sites(NULL) {
   }
 
-  inline size_t committed_amount() const {
-    return _committed_amount;
-  }
-};
-
-// maps a memory type flag to readable name
-typedef struct _memType2Name {
-  MEMFLAGS     _flag;
-  const char*  _name;
-} MemType2Name;
-
-
-// This class aggregates malloc'd records by memory type
-class MallocMem VALUE_OBJ_CLASS_SPEC {
- private:
-  MEMFLAGS       _type;
-
-  size_t         _count;
-  size_t         _amount;
-
- public:
-  MallocMem() {
-    _type = mtNone;
-    _count = 0;
-    _amount = 0;
+  ~MemBaseline() {
+    reset();
+    if (_arena != NULL) {
+      delete _arena;
+    }
   }
 
-  MallocMem(MEMFLAGS flags) {
-    assert(HAS_VALID_MEMORY_TYPE(flags), "no type");
-    _type = FLAGS_TO_MEMORY_TYPE(flags);
-    _count = 0;
-    _amount = 0;
-  }
+  bool baseline(bool summaryOnly = true);
 
-  inline void set_type(MEMFLAGS flag) {
-    _type = flag;
-  }
+  BaselineType baseline_type() const { return _baseline_type; }
 
-  inline void clear() {
-    _count = 0;
-    _amount = 0;
-    _type = mtNone;
+  MallocMemorySnapshot* malloc_memory_snapshot() const {
+    return _malloc_memory_snapshot;
   }
 
-  MallocMem& operator=(const MallocMem& m) {
-    assert(_type == m.type(), "different type");
-    _count = m.count();
-    _amount = m.amount();
-    return *this;
+  VirtualMemorySnapshot* virtual_memory_snapshot() const {
+    return _virtual_memory_snapshot;
   }
 
-  inline void inc(size_t amt) {
-    _amount += amt;
-    _count ++;
-  }
+  MallocSiteIterator malloc_sites(SortingOrder order);
+  VirtualMemorySiteIterator virtual_memory_sites(SortingOrder order);
 
-  inline void reduce(size_t amt) {
-    assert(_amount >= amt, "Just check");
-    _amount -= amt;
-  }
-
-  inline void overwrite_counter(size_t count) {
-    _count = count;
+  // Virtual memory allocation iterator always returns in virtual memory
+  // base address order.
+  VirtualMemoryAllocationIterator virtual_memory_allocations() {
+    assert(!_virtual_memory_allocations.is_empty(), "Not detail baseline");
+    return VirtualMemoryAllocationIterator(_virtual_memory_allocations.head());
   }
 
-  inline MEMFLAGS type() const {
-    return _type;
-  }
-
-  inline bool is_type(MEMFLAGS flags) const {
-    return FLAGS_TO_MEMORY_TYPE(flags) == _type;
-  }
-
-  inline size_t count() const {
-    return _count;
+  // Total reserved memory = total malloc'd memory + total reserved virtual
+  // memory
+  size_t total_reserved_memory() const {
+    assert(baseline_type() != Not_baselined, "Not yet baselined");
+    assert(_virtual_memory_snapshot != NULL, "No virtual memory snapshot");
+    assert(_malloc_memory_snapshot != NULL,  "No malloc memory snapshot");
+    size_t amount = _malloc_memory_snapshot->total() +
+           _virtual_memory_snapshot->total_reserved();
+    return amount;
   }
 
-  inline size_t amount() const {
-    return _amount;
-  }
-};
-
-// This class records live arena's memory usage
-class ArenaMem : public MallocMem {
- public:
-  ArenaMem(MEMFLAGS typeflag): MallocMem(typeflag) {
-  }
-  ArenaMem() { }
-};
-
-// This class aggregates virtual memory by its memory type
-class VMMem VALUE_OBJ_CLASS_SPEC {
- private:
-  MEMFLAGS       _type;
-
-  size_t         _count;
-  size_t         _reserved_amount;
-  size_t         _committed_amount;
-
- public:
-  VMMem() {
-    _type = mtNone;
-    _count = 0;
-    _reserved_amount = 0;
-    _committed_amount = 0;
+  // Total committed memory = total malloc'd memory + total committed
+  // virtual memory
+  size_t total_committed_memory() const {
+    assert(baseline_type() != Not_baselined, "Not yet baselined");
+    assert(_virtual_memory_snapshot != NULL,
+      "Not a snapshot");
+    size_t amount = _malloc_memory_snapshot->total() +
+           _virtual_memory_snapshot->total_committed();
+    return amount;
   }
 
-  VMMem(MEMFLAGS flags) {
-    assert(HAS_VALID_MEMORY_TYPE(flags), "no type");
-    _type = FLAGS_TO_MEMORY_TYPE(flags);
-    _count = 0;
-    _reserved_amount = 0;
-    _committed_amount = 0;
+  size_t total_arena_memory() const {
+    assert(baseline_type() != Not_baselined, "Not yet baselined");
+    assert(_malloc_memory_snapshot != NULL, "Not yet baselined");
+    return _malloc_memory_snapshot->total_arena();
   }
 
-  inline void clear() {
-    _type = mtNone;
-    _count = 0;
-    _reserved_amount = 0;
-    _committed_amount = 0;
+  size_t malloc_tracking_overhead() const {
+    assert(baseline_type() != Not_baselined, "Not yet baselined");
+    return _malloc_memory_snapshot->malloc_overhead()->size();
   }
 
-  inline void set_type(MEMFLAGS flags) {
-    _type = FLAGS_TO_MEMORY_TYPE(flags);
+  const MallocMemory* malloc_memory(MEMFLAGS flag) const {
+    assert(_malloc_memory_snapshot != NULL, "Not a snapshot");
+    return _malloc_memory_snapshot->by_type(flag);
   }
 
-  VMMem& operator=(const VMMem& m) {
-    assert(_type == m.type(), "different type");
-
-    _count = m.count();
-    _reserved_amount = m.reserved_amount();
-    _committed_amount = m.committed_amount();
-    return *this;
+  const VirtualMemory* virtual_memory(MEMFLAGS flag) const {
+    assert(_virtual_memory_snapshot != NULL, "Not a snapshot");
+    return _virtual_memory_snapshot->by_type(flag);
   }
 
 
-  inline MEMFLAGS type() const {
-    return _type;
-  }
-
-  inline bool is_type(MEMFLAGS flags) const {
-    return FLAGS_TO_MEMORY_TYPE(flags) == _type;
-  }
-
-  inline void inc(size_t reserved_amt, size_t committed_amt) {
-    _reserved_amount += reserved_amt;
-    _committed_amount += committed_amt;
-    _count ++;
-  }
-
-  inline size_t count() const {
-    return _count;
-  }
-
-  inline size_t reserved_amount() const {
-    return _reserved_amount;
+  size_t class_count() const {
+    assert(baseline_type() != Not_baselined, "Not yet baselined");
+    return _class_count;
   }
 
-  inline size_t committed_amount() const {
-    return _committed_amount;
+  size_t thread_count() const {
+    assert(baseline_type() != Not_baselined, "Not yet baselined");
+    assert(_malloc_memory_snapshot != NULL, "Baselined?");
+    return _malloc_memory_snapshot->thread_count();
   }
-};
-
-
-
-#define NUMBER_OF_MEMORY_TYPE    (mt_number_of_types + 1)
-
-class BaselineReporter;
-class BaselineComparisonReporter;
-
-/*
- * This class baselines current memory snapshot.
- * A memory baseline summarizes memory usage by memory type,
- * aggregates memory usage by callsites when detail tracking
- * is on.
- */
-class MemBaseline VALUE_OBJ_CLASS_SPEC {
-  friend class BaselineReporter;
-  friend class BaselineComparisonReporter;
-
- private:
-  // overall summaries
-  size_t        _total_malloced;
-  size_t        _total_vm_reserved;
-  size_t        _total_vm_committed;
-  size_t        _number_of_classes;
-  size_t        _number_of_threads;
-
-  // if it has properly baselined
-  bool          _baselined;
-
-  // we categorize memory into three categories within the memory type
-  MallocMem     _malloc_data[NUMBER_OF_MEMORY_TYPE];
-  VMMem         _vm_data[NUMBER_OF_MEMORY_TYPE];
-  ArenaMem      _arena_data[NUMBER_OF_MEMORY_TYPE];
-
-  // memory records that aggregate memory usage by callsites.
-  // only available when detail tracking is on.
-  MemPointerArray*  _malloc_cs;
-  MemPointerArray*  _vm_cs;
-  // virtual memory map
-  MemPointerArray*  _vm_map;
-
- private:
-  static MemType2Name  MemType2NameMap[NUMBER_OF_MEMORY_TYPE];
-
- private:
-  // should not use copy constructor
-  MemBaseline(MemBaseline& copy) { ShouldNotReachHere(); }
-
-  // check and block at a safepoint
-  static inline void check_safepoint(JavaThread* thr);
-
- public:
-  // create a memory baseline
-  MemBaseline();
-
-  ~MemBaseline();
-
-  inline bool baselined() const {
-    return _baselined;
-  }
-
-  MemBaseline& operator=(const MemBaseline& other);
 
   // reset the baseline for reuse
-  void clear();
-
-  // baseline the snapshot
-  bool baseline(MemSnapshot& snapshot, bool summary_only = true);
-
-  bool baseline(const MemPointerArray* malloc_records,
-                const MemPointerArray* vm_records,
-                bool summary_only = true);
+  void reset() {
+    _baseline_type = Not_baselined;
+    _malloc_memory_snapshot = NULL;
+    _virtual_memory_snapshot = NULL;
+    _class_count  = 0;
 
-  // total malloc'd memory of specified memory type
-  inline size_t malloc_amount(MEMFLAGS flag) const {
-    return _malloc_data[flag2index(flag)].amount();
-  }
-  // number of malloc'd memory blocks of specified memory type
-  inline size_t malloc_count(MEMFLAGS flag) const {
-    return _malloc_data[flag2index(flag)].count();
-  }
-  // total memory used by arenas of specified memory type
-  inline size_t arena_amount(MEMFLAGS flag) const {
-    return _arena_data[flag2index(flag)].amount();
-  }
-  // number of arenas of specified memory type
-  inline size_t arena_count(MEMFLAGS flag) const {
-    return _arena_data[flag2index(flag)].count();
-  }
-  // total reserved memory of specified memory type
-  inline size_t reserved_amount(MEMFLAGS flag) const {
-    return _vm_data[flag2index(flag)].reserved_amount();
-  }
-  // total committed memory of specified memory type
-  inline size_t committed_amount(MEMFLAGS flag) const {
-    return _vm_data[flag2index(flag)].committed_amount();
-  }
-  // total memory (malloc'd + mmap'd + arena) of specified
-  // memory type
-  inline size_t total_amount(MEMFLAGS flag) const {
-    int index = flag2index(flag);
-    return _malloc_data[index].amount() +
-           _vm_data[index].reserved_amount() +
-           _arena_data[index].amount();
+    _malloc_sites = NULL;
+    _virtual_memory_sites = NULL;
+    _virtual_memory_allocations = NULL;
+
+    if (_arena != NULL) {
+      _arena->destruct_contents();
+    }
   }
 
-  /* overall summaries */
+ private:
+  // Baseline summary information
+  bool baseline_summary();
 
-  // total malloc'd memory in snapshot
-  inline size_t total_malloc_amount() const {
-    return _total_malloced;
-  }
-  // total mmap'd memory in snapshot
-  inline size_t total_reserved_amount() const {
-    return _total_vm_reserved;
-  }
-  // total committed memory in snapshot
-  inline size_t total_committed_amount() const {
-    return _total_vm_committed;
-  }
-  // number of loaded classes
-  inline size_t number_of_classes() const {
-    return _number_of_classes;
-  }
-  // number of running threads
-  inline size_t number_of_threads() const {
-    return _number_of_threads;
-  }
-  // lookup human readable name of a memory type
-  static const char* type2name(MEMFLAGS type);
+  // Baseline allocation sites (detail tracking only)
+  bool baseline_allocation_sites();
+
+  // Aggregate virtual memory allocation by allocation sites
+  bool aggregate_virtual_memory_allocation_sites();
 
- private:
-  // convert memory flag to the index to mapping table
-  int         flag2index(MEMFLAGS flag) const;
-
-  // reset baseline values
-  void reset();
-
-  // summarize the records in global snapshot
-  bool baseline_malloc_summary(const MemPointerArray* malloc_records);
-  bool baseline_vm_summary(const MemPointerArray* vm_records);
-  bool baseline_malloc_details(const MemPointerArray* malloc_records);
-  bool baseline_vm_details(const MemPointerArray* vm_records);
+  Arena* arena() { return _arena; }
 
-  // print a line of malloc'd memory aggregated by callsite
-  void print_malloc_callsite(outputStream* st, address pc, size_t size,
-    size_t count, int diff_amt, int diff_count) const;
-  // print a line of mmap'd memory aggregated by callsite
-  void print_vm_callsite(outputStream* st, address pc, size_t rsz,
-    size_t csz, int diff_rsz, int diff_csz) const;
+  // Sorting allocation sites in different orders
+  // Sort allocation sites in size order
+  void malloc_sites_to_size_order();
+  // Sort allocation sites in call site address order
+  void malloc_sites_to_allocation_site_order();
 
-  // sorting functions for raw records
-  static int malloc_sort_by_pc(const void* p1, const void* p2);
-  static int malloc_sort_by_addr(const void* p1, const void* p2);
-
- private:
-  // sorting functions for baselined records
-  static int bl_malloc_sort_by_size(const void* p1, const void* p2);
-  static int bl_vm_sort_by_size(const void* p1, const void* p2);
-  static int bl_malloc_sort_by_pc(const void* p1, const void* p2);
-  static int bl_vm_sort_by_pc(const void* p1, const void* p2);
+  // Sort allocation sites in reserved size order
+  void virtual_memory_sites_to_size_order();
+  // Sort allocation sites in call site address order
+  void virtual_memory_sites_to_reservation_site_order();
 };
 
+#endif // INCLUDE_NMT
 
 #endif // SHARE_VM_SERVICES_MEM_BASELINE_HPP
--- a/hotspot/src/share/vm/services/memPtr.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/atomic.inline.hpp"
-#include "services/memPtr.hpp"
-#include "services/memTracker.hpp"
-
-volatile jint SequenceGenerator::_seq_number = 1;
-volatile unsigned long SequenceGenerator::_generation = 1;
-NOT_PRODUCT(jint SequenceGenerator::_max_seq_number = 1;)
-
-jint SequenceGenerator::next() {
-  jint seq = Atomic::add(1, &_seq_number);
-  if (seq < 0) {
-    MemTracker::shutdown(MemTracker::NMT_sequence_overflow);
-  } else {
-    NOT_PRODUCT(_max_seq_number = (seq > _max_seq_number) ? seq : _max_seq_number;)
-  }
-  return seq;
-}
-
--- a/hotspot/src/share/vm/services/memPtr.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,509 +0,0 @@
-/*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SERVICES_MEM_PTR_HPP
-#define SHARE_VM_SERVICES_MEM_PTR_HPP
-
-#include "memory/allocation.hpp"
-#include "runtime/os.hpp"
-#include "runtime/safepoint.hpp"
-
-/*
- * global sequence generator that generates sequence numbers to serialize
- * memory records.
- */
-class SequenceGenerator : AllStatic {
- public:
-  static jint next();
-
-  // peek last sequence number
-  static jint peek() {
-    return _seq_number;
-  }
-
-  // reset sequence number
-  static void reset() {
-    assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
-    _seq_number = 1;
-    _generation ++;
-  };
-
-  static unsigned long current_generation() { return _generation; }
-  NOT_PRODUCT(static jint max_seq_num() { return _max_seq_number; })
-
- private:
-  static volatile jint             _seq_number;
-  static volatile unsigned long    _generation;
-  NOT_PRODUCT(static jint          _max_seq_number; )
-};
-
-/*
- * followings are the classes that are used to hold memory activity records in different stages.
- *   MemPointer
- *     |--------MemPointerRecord
- *                     |
- *                     |----MemPointerRecordEx
- *                     |           |
- *                     |           |-------SeqMemPointerRecordEx
- *                     |
- *                     |----SeqMemPointerRecord
- *                     |
- *                     |----VMMemRegion
- *                               |
- *                               |-----VMMemRegionEx
- *
- *
- *  prefix 'Seq' - sequenced, the record contains a sequence number
- *  surfix 'Ex'  - extension, the record contains a caller's pc
- *
- *  per-thread recorder : SeqMemPointerRecord(Ex)
- *  snapshot staging    : SeqMemPointerRecord(Ex)
- *  snapshot            : MemPointerRecord(Ex) and VMMemRegion(Ex)
- *
- */
-
-/*
- * class that wraps an address to a memory block,
- * the memory pointer either points to a malloc'd
- * memory block, or a mmap'd memory block
- */
-class MemPointer VALUE_OBJ_CLASS_SPEC {
- public:
-  MemPointer(): _addr(0) { }
-  MemPointer(address addr): _addr(addr) { }
-
-  MemPointer(const MemPointer& copy_from) {
-    _addr = copy_from.addr();
-  }
-
-  inline address addr() const {
-    return _addr;
-  }
-
-  inline operator address() const {
-    return addr();
-  }
-
-  inline bool operator == (const MemPointer& other) const {
-    return addr() == other.addr();
-  }
-
-  inline MemPointer& operator = (const MemPointer& other) {
-    _addr = other.addr();
-    return *this;
-  }
-
- protected:
-  inline void set_addr(address addr) { _addr = addr; }
-
- protected:
-  // memory address
-  address    _addr;
-};
-
-/* MemPointerRecord records an activityand associated
- * attributes on a memory block.
- */
-class MemPointerRecord : public MemPointer {
- private:
-  MEMFLAGS       _flags;
-  size_t         _size;
-
-public:
-  /* extension of MemoryType enum
-   * see share/vm/memory/allocation.hpp for details.
-   *
-   * The tag values are associated to sorting orders, so be
-   * careful if changes are needed.
-   * The allocation records should be sorted ahead of tagging
-   * records, which in turn ahead of deallocation records
-   */
-  enum MemPointerTags {
-    tag_alloc            = 0x0001, // malloc or reserve record
-    tag_commit           = 0x0002, // commit record
-    tag_type             = 0x0003, // tag virtual memory to a memory type
-    tag_uncommit         = 0x0004, // uncommit record
-    tag_release          = 0x0005, // free or release record
-    tag_size             = 0x0006, // arena size
-    tag_masks            = 0x0007, // all tag bits
-    vmBit                = 0x0008
-  };
-
-  /* helper functions to interpret the tagging flags */
-
-  inline static bool is_allocation_record(MEMFLAGS flags) {
-    return (flags & tag_masks) == tag_alloc;
-  }
-
-  inline static bool is_deallocation_record(MEMFLAGS flags) {
-    return (flags & tag_masks) == tag_release;
-  }
-
-  inline static bool is_arena_record(MEMFLAGS flags) {
-    return (flags & (otArena | tag_size)) == otArena;
-  }
-
-  inline static bool is_arena_memory_record(MEMFLAGS flags) {
-    return (flags & (otArena | tag_size)) == (otArena | tag_size);
-  }
-
-  inline static bool is_virtual_memory_record(MEMFLAGS flags) {
-    return (flags & vmBit) != 0;
-  }
-
-  inline static bool is_virtual_memory_reserve_record(MEMFLAGS flags) {
-    return (flags & 0x0F) == (tag_alloc | vmBit);
-  }
-
-  inline static bool is_virtual_memory_commit_record(MEMFLAGS flags) {
-    return (flags & 0x0F) == (tag_commit | vmBit);
-  }
-
-  inline static bool is_virtual_memory_uncommit_record(MEMFLAGS flags) {
-    return (flags & 0x0F) == (tag_uncommit | vmBit);
-  }
-
-  inline static bool is_virtual_memory_release_record(MEMFLAGS flags) {
-    return (flags & 0x0F) == (tag_release | vmBit);
-  }
-
-  inline static bool is_virtual_memory_type_record(MEMFLAGS flags) {
-    return (flags & 0x0F) == (tag_type | vmBit);
-  }
-
-  /* tagging flags */
-  inline static MEMFLAGS malloc_tag()                 { return tag_alloc;   }
-  inline static MEMFLAGS free_tag()                   { return tag_release; }
-  inline static MEMFLAGS arena_size_tag()             { return tag_size | otArena; }
-  inline static MEMFLAGS virtual_memory_tag()         { return vmBit; }
-  inline static MEMFLAGS virtual_memory_reserve_tag() { return (tag_alloc | vmBit); }
-  inline static MEMFLAGS virtual_memory_commit_tag()  { return (tag_commit | vmBit); }
-  inline static MEMFLAGS virtual_memory_uncommit_tag(){ return (tag_uncommit | vmBit); }
-  inline static MEMFLAGS virtual_memory_release_tag() { return (tag_release | vmBit); }
-  inline static MEMFLAGS virtual_memory_type_tag()    { return (tag_type | vmBit); }
-
- public:
-  MemPointerRecord(): _size(0), _flags(mtNone) { }
-
-  MemPointerRecord(address addr, MEMFLAGS memflags, size_t size = 0):
-      MemPointer(addr), _flags(memflags), _size(size) { }
-
-  MemPointerRecord(const MemPointerRecord& copy_from):
-    MemPointer(copy_from), _flags(copy_from.flags()),
-    _size(copy_from.size()) {
-  }
-
-  /* MemPointerRecord is not sequenced, it always return
-   * 0 to indicate non-sequenced
-   */
-  virtual jint seq() const               { return 0; }
-
-  inline size_t   size()  const          { return _size; }
-  inline void set_size(size_t size)      { _size = size; }
-
-  inline MEMFLAGS flags() const          { return _flags; }
-  inline void set_flags(MEMFLAGS flags)  { _flags = flags; }
-
-  MemPointerRecord& operator= (const MemPointerRecord& ptr) {
-    MemPointer::operator=(ptr);
-    _flags = ptr.flags();
-#ifdef ASSERT
-    if (IS_ARENA_OBJ(_flags)) {
-      assert(!is_vm_pointer(), "wrong flags");
-      assert((_flags & ot_masks) == otArena, "wrong flags");
-    }
-#endif
-    _size = ptr.size();
-    return *this;
-  }
-
-  // if the pointer represents a malloc-ed memory address
-  inline bool is_malloced_pointer() const {
-    return !is_vm_pointer();
-  }
-
-  // if the pointer represents a virtual memory address
-  inline bool is_vm_pointer() const {
-    return is_virtual_memory_record(_flags);
-  }
-
-  // if this record records a 'malloc' or virtual memory
-  // 'reserve' call
-  inline bool is_allocation_record() const {
-    return is_allocation_record(_flags);
-  }
-
-  // if this record records a size information of an arena
-  inline bool is_arena_memory_record() const {
-    return is_arena_memory_record(_flags);
-  }
-
-  // if this pointer represents an address to an arena object
-  inline bool is_arena_record() const {
-    return is_arena_record(_flags);
-  }
-
-  // if this record represents a size information of specific arena
-  inline bool is_memory_record_of_arena(const MemPointerRecord* arena_rc) {
-    assert(is_arena_memory_record(), "not size record");
-    assert(arena_rc->is_arena_record(), "not arena record");
-    return (arena_rc->addr() + sizeof(void*)) == addr();
-  }
-
-  // if this record records a 'free' or virtual memory 'free' call
-  inline bool is_deallocation_record() const {
-    return is_deallocation_record(_flags);
-  }
-
-  // if this record records a virtual memory 'commit' call
-  inline bool is_commit_record() const {
-    return is_virtual_memory_commit_record(_flags);
-  }
-
-  // if this record records a virtual memory 'uncommit' call
-  inline bool is_uncommit_record() const {
-    return is_virtual_memory_uncommit_record(_flags);
-  }
-
-  // if this record is a tagging record of a virtual memory block
-  inline bool is_type_tagging_record() const {
-    return is_virtual_memory_type_record(_flags);
-  }
-
-  // if the two memory pointer records actually represent the same
-  // memory block
-  inline bool is_same_region(const MemPointerRecord* other) const {
-    return (addr() == other->addr() && size() == other->size());
-  }
-
-  // if this memory region fully contains another one
-  inline bool contains_region(const MemPointerRecord* other) const {
-    return contains_region(other->addr(), other->size());
-  }
-
-  // if this memory region fully contains specified memory range
-  inline bool contains_region(address add, size_t sz) const {
-    return (addr() <= add && addr() + size() >= add + sz);
-  }
-
-  inline bool contains_address(address add) const {
-    return (addr() <= add && addr() + size() > add);
-  }
-
-  // if this memory region overlaps another region
-  inline bool overlaps_region(const MemPointerRecord* other) const {
-    assert(other != NULL, "Just check");
-    assert(size() > 0 && other->size() > 0, "empty range");
-    return contains_address(other->addr()) ||
-           contains_address(other->addr() + other->size() - 1) || // exclude end address
-           other->contains_address(addr()) ||
-           other->contains_address(addr() + size() - 1); // exclude end address
-  }
-
-};
-
-// MemPointerRecordEx also records callsite pc, from where
-// the memory block is allocated
-class MemPointerRecordEx : public MemPointerRecord {
- private:
-  address      _pc;  // callsite pc
-
- public:
-  MemPointerRecordEx(): _pc(0) { }
-
-  MemPointerRecordEx(address addr, MEMFLAGS memflags, size_t size = 0, address pc = 0):
-    MemPointerRecord(addr, memflags, size), _pc(pc) {}
-
-  MemPointerRecordEx(const MemPointerRecordEx& copy_from):
-    MemPointerRecord(copy_from), _pc(copy_from.pc()) {}
-
-  inline address pc() const { return _pc; }
-
-  void init(const MemPointerRecordEx* mpe) {
-    MemPointerRecord::operator=(*mpe);
-    _pc = mpe->pc();
-  }
-
-  void init(const MemPointerRecord* mp) {
-    MemPointerRecord::operator=(*mp);
-    _pc = 0;
-  }
-};
-
-// a virtual memory region. The region can represent a reserved
-// virtual memory region or a committed memory region
-class VMMemRegion : public MemPointerRecord {
-public:
-  VMMemRegion() { }
-
-  void init(const MemPointerRecord* mp) {
-    assert(mp->is_vm_pointer(), "Sanity check");
-    _addr = mp->addr();
-      set_size(mp->size());
-    set_flags(mp->flags());
-  }
-
-  VMMemRegion& operator=(const VMMemRegion& other) {
-    MemPointerRecord::operator=(other);
-    return *this;
-  }
-
-  inline bool is_reserved_region() const {
-    return is_allocation_record();
-  }
-
-  inline bool is_committed_region() const {
-    return is_commit_record();
-  }
-
-  /* base address of this virtual memory range */
-  inline address base() const {
-    return addr();
-  }
-
-  /* tag this virtual memory range to the specified memory type */
-  inline void tag(MEMFLAGS f) {
-    set_flags(flags() | (f & mt_masks));
-  }
-
-  // expand this region to also cover specified range.
-  // The range has to be on either end of the memory region.
-  void expand_region(address addr, size_t sz) {
-    if (addr < base()) {
-      assert(addr + sz == base(), "Sanity check");
-      _addr = addr;
-      set_size(size() + sz);
-    } else {
-      assert(base() + size() == addr, "Sanity check");
-      set_size(size() + sz);
-    }
-  }
-
-  // exclude the specified address range from this region.
-  // The excluded memory range has to be on either end of this memory
-  // region.
-  inline void exclude_region(address add, size_t sz) {
-    assert(is_reserved_region() || is_committed_region(), "Sanity check");
-    assert(addr() != NULL && size() != 0, "Sanity check");
-    assert(add >= addr() && add < addr() + size(), "Sanity check");
-    assert(add == addr() || (add + sz) == (addr() + size()),
-      "exclude in the middle");
-    if (add == addr()) {
-      set_addr(add + sz);
-      set_size(size() - sz);
-    } else {
-      set_size(size() - sz);
-    }
-  }
-};
-
-class VMMemRegionEx : public VMMemRegion {
- private:
-  jint   _seq;  // sequence number
-
- public:
-  VMMemRegionEx(): _pc(0) { }
-
-  void init(const MemPointerRecordEx* mpe) {
-    VMMemRegion::init(mpe);
-    _pc = mpe->pc();
-  }
-
-  void init(const MemPointerRecord* mpe) {
-    VMMemRegion::init(mpe);
-    _pc = 0;
-  }
-
-  VMMemRegionEx& operator=(const VMMemRegionEx& other) {
-    VMMemRegion::operator=(other);
-    _pc = other.pc();
-    return *this;
-  }
-
-  inline address pc() const { return _pc; }
- private:
-  address   _pc;
-};
-
-/*
- * Sequenced memory record
- */
-class SeqMemPointerRecord : public MemPointerRecord {
- private:
-   jint _seq;  // sequence number
-
- public:
-  SeqMemPointerRecord(): _seq(0){ }
-
-  SeqMemPointerRecord(address addr, MEMFLAGS flags, size_t size, jint seq)
-    : MemPointerRecord(addr, flags, size), _seq(seq)  {
-  }
-
-  SeqMemPointerRecord(const SeqMemPointerRecord& copy_from)
-    : MemPointerRecord(copy_from) {
-    _seq = copy_from.seq();
-  }
-
-  SeqMemPointerRecord& operator= (const SeqMemPointerRecord& ptr) {
-    MemPointerRecord::operator=(ptr);
-    _seq = ptr.seq();
-    return *this;
-  }
-
-  inline jint seq() const {
-    return _seq;
-  }
-};
-
-
-
-class SeqMemPointerRecordEx : public MemPointerRecordEx {
- private:
-  jint    _seq;  // sequence number
-
- public:
-  SeqMemPointerRecordEx(): _seq(0) { }
-
-  SeqMemPointerRecordEx(address addr, MEMFLAGS flags, size_t size,
-    jint seq, address pc):
-    MemPointerRecordEx(addr, flags, size, pc), _seq(seq)  {
-  }
-
-  SeqMemPointerRecordEx(const SeqMemPointerRecordEx& copy_from)
-    : MemPointerRecordEx(copy_from) {
-    _seq = copy_from.seq();
-  }
-
-  SeqMemPointerRecordEx& operator= (const SeqMemPointerRecordEx& ptr) {
-    MemPointerRecordEx::operator=(ptr);
-    _seq = ptr.seq();
-    return *this;
-  }
-
-  inline jint seq() const {
-    return _seq;
-  }
-};
-
-#endif // SHARE_VM_SERVICES_MEM_PTR_HPP
--- a/hotspot/src/share/vm/services/memPtrArray.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,306 +0,0 @@
-/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-#ifndef SHARE_VM_UTILITIES_MEM_PTR_ARRAY_HPP
-#define SHARE_VM_UTILITIES_MEM_PTR_ARRAY_HPP
-
-#include "memory/allocation.hpp"
-#include "services/memPtr.hpp"
-
-class MemPtr;
-class MemRecorder;
-class ArenaInfo;
-class MemSnapshot;
-
-extern "C" {
-  typedef int (*FN_SORT)(const void *, const void *);
-}
-
-
-// Memory pointer array interface. This array is used by NMT to hold
-// various memory block information.
-// The memory pointer arrays are usually walked with their iterators.
-
-class MemPointerArray : public CHeapObj<mtNMT> {
- public:
-  virtual ~MemPointerArray() { }
-
-  // return true if it can not allocate storage for the data
-  virtual bool out_of_memory() const = 0;
-  virtual bool is_empty() const = 0;
-  virtual bool is_full() = 0;
-  virtual int  length() const = 0;
-  virtual void clear() = 0;
-  virtual bool append(MemPointer* ptr) = 0;
-  virtual bool insert_at(MemPointer* ptr, int pos) = 0;
-  virtual bool remove_at(int pos) = 0;
-  virtual MemPointer* at(int index) const = 0;
-  virtual void sort(FN_SORT fn) = 0;
-  virtual size_t instance_size() const = 0;
-  virtual bool shrink() = 0;
-
-  NOT_PRODUCT(virtual int capacity() const = 0;)
-};
-
-// Iterator interface
-class MemPointerArrayIterator VALUE_OBJ_CLASS_SPEC {
- public:
-  // return the pointer at current position
-  virtual MemPointer* current() const = 0;
-  // return the next pointer and advance current position
-  virtual MemPointer* next() = 0;
-  // return next pointer without advancing current position
-  virtual MemPointer* peek_next() const = 0;
-  // return previous pointer without changing current position
-  virtual MemPointer* peek_prev() const = 0;
-  // remove the pointer at current position
-  virtual void        remove() = 0;
-  // insert the pointer at current position
-  virtual bool        insert(MemPointer* ptr) = 0;
-  // insert specified element after current position and
-  // move current position to newly inserted position
-  virtual bool        insert_after(MemPointer* ptr) = 0;
-};
-
-// implementation class
-class MemPointerArrayIteratorImpl : public MemPointerArrayIterator {
- protected:
-  MemPointerArray*  _array;
-  int               _pos;
-
- public:
-  MemPointerArrayIteratorImpl(MemPointerArray* arr) {
-    assert(arr != NULL, "Parameter check");
-    _array = arr;
-    _pos = 0;
-  }
-
-  virtual MemPointer* current() const {
-    if (_pos < _array->length()) {
-      return _array->at(_pos);
-    }
-    return NULL;
-  }
-
-  virtual MemPointer* next() {
-    if (_pos + 1 < _array->length()) {
-      return _array->at(++_pos);
-    }
-    _pos = _array->length();
-    return NULL;
-  }
-
-  virtual MemPointer* peek_next() const {
-    if (_pos + 1 < _array->length()) {
-      return _array->at(_pos + 1);
-    }
-    return NULL;
-  }
-
-  virtual MemPointer* peek_prev() const {
-    if (_pos > 0) {
-      return _array->at(_pos - 1);
-    }
-    return NULL;
-  }
-
-  virtual void remove() {
-    if (_pos < _array->length()) {
-      _array->remove_at(_pos);
-    }
-  }
-
-  virtual bool insert(MemPointer* ptr) {
-    return _array->insert_at(ptr, _pos);
-  }
-
-  virtual bool insert_after(MemPointer* ptr) {
-    if (_array->insert_at(ptr, _pos + 1)) {
-      _pos ++;
-      return true;
-    }
-    return false;
-  }
-};
-
-
-
-// Memory pointer array implementation.
-// This implementation implements expandable array
-#define DEFAULT_PTR_ARRAY_SIZE 1024
-
-template <class E> class MemPointerArrayImpl : public MemPointerArray {
- private:
-  int                   _max_size;
-  int                   _size;
-  bool                  _init_elements;
-  E*                    _data;
-
- public:
-  MemPointerArrayImpl(int initial_size = DEFAULT_PTR_ARRAY_SIZE, bool init_elements = true):
-   _max_size(initial_size), _size(0), _init_elements(init_elements) {
-    _data = (E*)raw_allocate(sizeof(E), initial_size);
-    if (_init_elements) {
-      for (int index = 0; index < _max_size; index ++) {
-        ::new ((void*)&_data[index]) E();
-      }
-    }
-  }
-
-  virtual ~MemPointerArrayImpl() {
-    if (_data != NULL) {
-      raw_free(_data);
-    }
-  }
-
- public:
-  bool out_of_memory() const {
-    return (_data == NULL);
-  }
-
-  size_t instance_size() const {
-    return sizeof(MemPointerArrayImpl<E>) + _max_size * sizeof(E);
-  }
-
-  bool is_empty() const {
-    assert(_data != NULL, "Just check");
-    return _size == 0;
-  }
-
-  bool is_full() {
-    assert(_data != NULL, "Just check");
-    if (_size < _max_size) {
-      return false;
-    } else {
-      return !expand_array();
-    }
-  }
-
-  int length() const {
-    assert(_data != NULL, "Just check");
-    return _size;
-  }
-
-  NOT_PRODUCT(int capacity() const { return _max_size; })
-
-  void clear() {
-    assert(_data != NULL, "Just check");
-    _size = 0;
-  }
-
-  bool append(MemPointer* ptr) {
-    assert(_data != NULL, "Just check");
-    if (is_full()) {
-      return false;
-    }
-    _data[_size ++] = *(E*)ptr;
-    return true;
-  }
-
-  bool insert_at(MemPointer* ptr, int pos) {
-    assert(_data != NULL, "Just check");
-    if (is_full()) {
-      return false;
-    }
-    for (int index = _size; index > pos; index --) {
-      _data[index] = _data[index - 1];
-    }
-    _data[pos] = *(E*)ptr;
-    _size ++;
-    return true;
-  }
-
-  bool remove_at(int pos) {
-    assert(_data != NULL, "Just check");
-    if (_size <= pos && pos >= 0) {
-      return false;
-    }
-    -- _size;
-
-    for (int index = pos; index < _size; index ++) {
-      _data[index] = _data[index + 1];
-    }
-    return true;
-  }
-
-  MemPointer* at(int index) const {
-    assert(_data != NULL, "Just check");
-    assert(index >= 0 && index < _size, "illegal index");
-    return &_data[index];
-  }
-
-  bool shrink() {
-    float used = ((float)_size) / ((float)_max_size);
-    if (used < 0.40) {
-      E* old_ptr = _data;
-      int new_size = ((_max_size) / (2 * DEFAULT_PTR_ARRAY_SIZE) + 1) * DEFAULT_PTR_ARRAY_SIZE;
-      _data = (E*)raw_reallocate(_data, sizeof(E), new_size);
-      if (_data == NULL) {
-        _data = old_ptr;
-        return false;
-      } else {
-        _max_size = new_size;
-        return true;
-      }
-    }
-    return false;
-  }
-
-  void sort(FN_SORT fn) {
-    assert(_data != NULL, "Just check");
-    qsort((void*)_data, _size, sizeof(E), fn);
-  }
-
- private:
-  bool  expand_array() {
-    assert(_data != NULL, "Not yet allocated");
-    E* old_ptr = _data;
-    if ((_data = (E*)raw_reallocate((void*)_data, sizeof(E),
-      _max_size + DEFAULT_PTR_ARRAY_SIZE)) == NULL) {
-      _data = old_ptr;
-      return false;
-    } else {
-      _max_size += DEFAULT_PTR_ARRAY_SIZE;
-      if (_init_elements) {
-        for (int index = _size; index < _max_size; index ++) {
-          ::new ((void*)&_data[index]) E();
-        }
-      }
-      return true;
-    }
-  }
-
-  void* raw_allocate(size_t elementSize, int items) {
-    return os::malloc(elementSize * items, mtNMT);
-  }
-
-  void* raw_reallocate(void* ptr, size_t elementSize, int items) {
-    return os::realloc(ptr, elementSize * items, mtNMT);
-  }
-
-  void  raw_free(void* ptr) {
-    os::free(ptr, mtNMT);
-  }
-};
-
-#endif // SHARE_VM_UTILITIES_MEM_PTR_ARRAY_HPP
--- a/hotspot/src/share/vm/services/memRecorder.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,171 +0,0 @@
-/*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-
-#include "runtime/atomic.inline.hpp"
-#include "services/memBaseline.hpp"
-#include "services/memRecorder.hpp"
-#include "services/memPtr.hpp"
-#include "services/memTracker.hpp"
-
-MemPointer* SequencedRecordIterator::next_record() {
-  MemPointerRecord* itr_cur = (MemPointerRecord*)_itr.current();
-  if (itr_cur == NULL)  {
-    return itr_cur;
-  }
-
-  MemPointerRecord* itr_next = (MemPointerRecord*)_itr.next();
-
-  // don't collapse virtual memory records
-  while (itr_next != NULL && !itr_cur->is_vm_pointer() &&
-    !itr_next->is_vm_pointer() &&
-    same_kind(itr_cur, itr_next)) {
-    itr_cur = itr_next;
-    itr_next = (MemPointerRecord*)_itr.next();
-  }
-
-  return itr_cur;
-}
-
-
-volatile jint MemRecorder::_instance_count = 0;
-
-MemRecorder::MemRecorder() {
-  assert(MemTracker::is_on(), "Native memory tracking is off");
-  Atomic::inc(&_instance_count);
-  set_generation();
-
-  if (MemTracker::track_callsite()) {
-    _pointer_records = new (std::nothrow)FixedSizeMemPointerArray<SeqMemPointerRecordEx,
-        DEFAULT_RECORDER_PTR_ARRAY_SIZE>();
-  } else {
-    _pointer_records = new (std::nothrow)FixedSizeMemPointerArray<SeqMemPointerRecord,
-        DEFAULT_RECORDER_PTR_ARRAY_SIZE>();
-  }
-  _next = NULL;
-
-
-  if (_pointer_records != NULL) {
-    // recode itself
-    address pc = CURRENT_PC;
-    record((address)this, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder),
-        sizeof(MemRecorder), SequenceGenerator::next(), pc);
-    record((address)_pointer_records, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder),
-        _pointer_records->instance_size(), SequenceGenerator::next(), pc);
-  }
-}
-
-MemRecorder::~MemRecorder() {
-  if (_pointer_records != NULL) {
-    if (MemTracker::is_on()) {
-      MemTracker::record_free((address)_pointer_records, mtNMT);
-      MemTracker::record_free((address)this, mtNMT);
-    }
-    delete _pointer_records;
-  }
-  // delete all linked recorders
-  while (_next != NULL) {
-    MemRecorder* tmp = _next;
-    _next = _next->next();
-    tmp->set_next(NULL);
-    delete tmp;
-  }
-  Atomic::dec(&_instance_count);
-}
-
-// Sorting order:
-//   1. memory block address
-//   2. mem pointer record tags
-//   3. sequence number
-int MemRecorder::sort_record_fn(const void* e1, const void* e2) {
-  const MemPointerRecord* p1 = (const MemPointerRecord*)e1;
-  const MemPointerRecord* p2 = (const MemPointerRecord*)e2;
-  int delta = UNSIGNED_COMPARE(p1->addr(), p2->addr());
-  if (delta == 0) {
-    int df = UNSIGNED_COMPARE((p1->flags() & MemPointerRecord::tag_masks),
-                              (p2->flags() & MemPointerRecord::tag_masks));
-    if (df == 0) {
-      assert(p1->seq() != p2->seq(), "dup seq");
-      return p1->seq() - p2->seq();
-    } else {
-      return df;
-    }
-  } else {
-    return delta;
-  }
-}
-
-bool MemRecorder::record(address p, MEMFLAGS flags, size_t size, jint seq, address pc) {
-  assert(seq > 0, "No sequence number");
-#ifdef ASSERT
-  if (MemPointerRecord::is_virtual_memory_record(flags)) {
-    assert((flags & MemPointerRecord::tag_masks) != 0, "bad virtual memory record");
-  } else {
-    assert((flags & MemPointerRecord::tag_masks) == MemPointerRecord::malloc_tag() ||
-           (flags & MemPointerRecord::tag_masks) == MemPointerRecord::free_tag() ||
-           IS_ARENA_OBJ(flags),
-           "bad malloc record");
-  }
-  // a recorder should only hold records within the same generation
-  unsigned long cur_generation = SequenceGenerator::current_generation();
-  assert(cur_generation == _generation,
-         "this thread did not enter sync point");
-#endif
-
-  if (MemTracker::track_callsite()) {
-    SeqMemPointerRecordEx ap(p, flags, size, seq, pc);
-    debug_only(check_dup_seq(ap.seq());)
-    return _pointer_records->append(&ap);
-  } else {
-    SeqMemPointerRecord ap(p, flags, size, seq);
-    debug_only(check_dup_seq(ap.seq());)
-    return _pointer_records->append(&ap);
-  }
-}
-
-  // iterator for alloc pointers
-SequencedRecordIterator MemRecorder::pointer_itr() {
-  assert(_pointer_records != NULL, "just check");
-  _pointer_records->sort((FN_SORT)sort_record_fn);
-  return SequencedRecordIterator(_pointer_records);
-}
-
-
-void MemRecorder::set_generation() {
-  _generation = SequenceGenerator::current_generation();
-}
-
-#ifdef ASSERT
-
-void MemRecorder::check_dup_seq(jint seq) const {
-  MemPointerArrayIteratorImpl itr(_pointer_records);
-  MemPointerRecord* rc = (MemPointerRecord*)itr.current();
-  while (rc != NULL) {
-    assert(rc->seq() != seq, "dup seq");
-    rc = (MemPointerRecord*)itr.next();
-  }
-}
-
-#endif
--- a/hotspot/src/share/vm/services/memRecorder.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,271 +0,0 @@
-/*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SERVICES_MEM_RECORDER_HPP
-#define SHARE_VM_SERVICES_MEM_RECORDER_HPP
-
-#include "memory/allocation.hpp"
-#include "runtime/os.hpp"
-#include "services/memPtrArray.hpp"
-
-class MemSnapshot;
-class MemTracker;
-class MemTrackWorker;
-
-// Fixed size memory pointer array implementation
-template <class E, int SIZE> class FixedSizeMemPointerArray :
-  public MemPointerArray {
-  // This implementation is for memory recorder only
-  friend class MemRecorder;
-
- private:
-  E      _data[SIZE];
-  int    _size;
-
- protected:
-  FixedSizeMemPointerArray(bool init_elements = false):
-   _size(0){
-    if (init_elements) {
-      for (int index = 0; index < SIZE; index ++) {
-        ::new ((void*)&_data[index]) E();
-      }
-    }
-  }
-
-  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
-    // the instance is part of memRecorder, needs to be tagged with 'otNMTRecorder'
-    // to avoid recursion
-    return os::malloc(size, (mtNMT | otNMTRecorder));
-  }
-
-  void* operator new(size_t size) throw() {
-    assert(false, "use nothrow version");
-    return NULL;
-  }
-
-  void operator delete(void* p) {
-    os::free(p, (mtNMT | otNMTRecorder));
-  }
-
-  // instance size
-  inline size_t instance_size() const {
-    return sizeof(FixedSizeMemPointerArray<E, SIZE>);
-  }
-
-  NOT_PRODUCT(int capacity() const { return SIZE; })
-
- public:
-  // implementation of public interface
-  bool out_of_memory() const { return false; }
-  bool is_empty()      const { return _size == 0; }
-  bool is_full()             { return length() >= SIZE; }
-  int  length()        const { return _size; }
-
-  void clear() {
-    _size = 0;
-  }
-
-  bool append(MemPointer* ptr) {
-    if (is_full()) return false;
-    _data[_size ++] = *(E*)ptr;
-    return true;
-  }
-
-  virtual bool insert_at(MemPointer* p, int pos) {
-    assert(false, "append only");
-    return false;
-  }
-
-  virtual bool remove_at(int pos) {
-    assert(false, "not supported");
-    return false;
-  }
-
-  MemPointer* at(int index) const {
-    assert(index >= 0 && index < length(),
-      "parameter check");
-    return ((E*)&_data[index]);
-  }
-
-  void sort(FN_SORT fn) {
-    qsort((void*)_data, _size, sizeof(E), fn);
-  }
-
-  bool shrink() {
-    return false;
-  }
-};
-
-
-// This iterator requires pre-sorted MemPointerArray, which is sorted by:
-//  1. address
-//  2. allocation type
-//  3. sequence number
-// During the array walking, iterator collapses pointers with the same
-// address and allocation type, and only returns the one with highest
-// sequence number.
-//
-// This is read-only iterator, update methods are asserted.
-class SequencedRecordIterator : public MemPointerArrayIterator {
- private:
-   MemPointerArrayIteratorImpl _itr;
-   MemPointer*                 _cur;
-
- public:
-  SequencedRecordIterator(const MemPointerArray* arr):
-    _itr(const_cast<MemPointerArray*>(arr)) {
-    _cur = next_record();
-  }
-
-  SequencedRecordIterator(const SequencedRecordIterator& itr):
-    _itr(itr._itr) {
-    _cur = next_record();
-  }
-
-  // return the pointer at current position
-  virtual MemPointer* current() const {
-    return _cur;
-  };
-
-  // return the next pointer and advance current position
-  virtual MemPointer* next() {
-    _cur = next_record();
-    return _cur;
-  }
-
-  // return the next pointer without advancing current position
-  virtual MemPointer* peek_next() const {
-    assert(false, "not implemented");
-    return NULL;
-
-  }
-  // return the previous pointer without changing current position
-  virtual MemPointer* peek_prev() const {
-    assert(false, "not implemented");
-    return NULL;
-  }
-
-  // remove the pointer at current position
-  virtual void remove() {
-    assert(false, "read-only iterator");
-  };
-  // insert the pointer at current position
-  virtual bool insert(MemPointer* ptr) {
-    assert(false, "read-only iterator");
-    return false;
-  }
-
-  virtual bool insert_after(MemPointer* ptr) {
-    assert(false, "read-only iterator");
-    return false;
-  }
- private:
-  // collapse the 'same kind' of records, and return this 'kind' of
-  // record with highest sequence number
-  MemPointer* next_record();
-
-  // Test if the two records are the same kind: the same memory block and allocation
-  // type.
-  inline bool same_kind(const MemPointerRecord* p1, const MemPointerRecord* p2) const {
-    assert(!p1->is_vm_pointer() && !p2->is_vm_pointer(), "malloc pointer only");
-    return (p1->addr() == p2->addr() &&
-      (p1->flags() &MemPointerRecord::tag_masks) ==
-      (p2->flags() & MemPointerRecord::tag_masks));
-  }
-};
-
-
-
-#define DEFAULT_RECORDER_PTR_ARRAY_SIZE 512
-
-class MemRecorder : public CHeapObj<mtNMT|otNMTRecorder> {
-  friend class MemSnapshot;
-  friend class MemTracker;
-  friend class MemTrackWorker;
-  friend class GenerationData;
-
- protected:
-  // the array that holds memory records
-  MemPointerArray*         _pointer_records;
-
- private:
-  // used for linked list
-  MemRecorder*             _next;
-  // active recorder can only record a certain generation data
-  unsigned long            _generation;
-
- protected:
-  _NOINLINE_ MemRecorder();
-  ~MemRecorder();
-
-  // record a memory operation
-  bool record(address addr, MEMFLAGS flags, size_t size, jint seq, address caller_pc = 0);
-
-  // linked list support
-  inline void set_next(MemRecorder* rec) {
-    _next = rec;
-  }
-
-  inline MemRecorder* next() const {
-    return _next;
-  }
-
-  // if the recorder is full
-  inline bool is_full() const {
-    assert(_pointer_records != NULL, "just check");
-    return _pointer_records->is_full();
-  }
-
-  // if running out of memory when initializing recorder's internal
-  // data
-  inline bool out_of_memory() const {
-    return (_pointer_records == NULL ||
-      _pointer_records->out_of_memory());
-  }
-
-  inline void clear() {
-    assert(_pointer_records != NULL, "Just check");
-    _pointer_records->clear();
-  }
-
-  SequencedRecordIterator pointer_itr();
-
-  // return the generation of this recorder which it belongs to
-  unsigned long get_generation() const { return _generation; }
- protected:
-  // number of MemRecorder instance
-  static volatile jint _instance_count;
-
- private:
-  // sorting function, sort records into following order
-  // 1. memory address
-  // 2. allocation type
-  // 3. sequence number
-  static int sort_record_fn(const void* e1, const void* e2);
-
-  debug_only(void check_dup_seq(jint seq) const;)
-  void set_generation();
-};
-
-#endif // SHARE_VM_SERVICES_MEM_RECORDER_HPP
--- a/hotspot/src/share/vm/services/memReporter.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/services/memReporter.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -22,618 +22,595 @@
  *
  */
 #include "precompiled.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "runtime/os.hpp"
+
+#include "memory/allocation.hpp"
+#include "services/mallocTracker.hpp"
 #include "services/memReporter.hpp"
-#include "services/memPtrArray.hpp"
-#include "services/memTracker.hpp"
+#include "services/virtualMemoryTracker.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+size_t MemReporterBase::reserved_total(const MallocMemory* malloc, const VirtualMemory* vm) const {
+  return malloc->malloc_size() + malloc->arena_size() + vm->reserved();
+}
+
+size_t MemReporterBase::committed_total(const MallocMemory* malloc, const VirtualMemory* vm) const {
+  return malloc->malloc_size() + malloc->arena_size() + vm->committed();
+}
 
-PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
+void MemReporterBase::print_total(size_t reserved, size_t committed) const {
+  const char* scale = current_scale();
+  output()->print("reserved=" SIZE_FORMAT "%s, committed=" SIZE_FORMAT "%s",
+    amount_in_current_scale(reserved), scale, amount_in_current_scale(committed), scale);
+}
+
+void MemReporterBase::print_malloc(size_t amount, size_t count) const {
+  const char* scale = current_scale();
+  outputStream* out = output();
+  out->print("(malloc=" SIZE_FORMAT "%s",
+    amount_in_current_scale(amount), scale);
+
+  if (count > 0) {
+    out->print(" #" SIZE_FORMAT "", count);
+  }
 
-const char* BaselineOutputer::memory_unit(size_t scale) {
-  switch(scale) {
-    case K: return "KB";
-    case M: return "MB";
-    case G: return "GB";
-  }
-  ShouldNotReachHere();
-  return NULL;
+  out->print(")");
+}
+
+void MemReporterBase::print_virtual_memory(size_t reserved, size_t committed) const {
+  const char* scale = current_scale();
+  output()->print("(mmap: reserved=" SIZE_FORMAT "%s, committed=" SIZE_FORMAT "%s)",
+    amount_in_current_scale(reserved), scale, amount_in_current_scale(committed), scale);
+}
+
+void MemReporterBase::print_malloc_line(size_t amount, size_t count) const {
+  output()->print("%28s", " ");
+  print_malloc(amount, count);
+  output()->print_cr(" ");
+}
+
+void MemReporterBase::print_virtual_memory_line(size_t reserved, size_t committed) const {
+  output()->print("%28s", " ");
+  print_virtual_memory(reserved, committed);
+  output()->print_cr(" ");
+}
+
+void MemReporterBase::print_arena_line(size_t amount, size_t count) const {
+  const char* scale = current_scale();
+  output()->print_cr("%27s (arena=" SIZE_FORMAT "%s #" SIZE_FORMAT ")", " ",
+    amount_in_current_scale(amount), scale, count);
+}
+
+void MemReporterBase::print_virtual_memory_region(const char* type, address base, size_t size) const {
+  const char* scale = current_scale();
+  output()->print("[" PTR_FORMAT " - " PTR_FORMAT "] %s " SIZE_FORMAT "%s",
+    p2i(base), p2i(base + size), type, amount_in_current_scale(size), scale);
 }
 
 
-void BaselineReporter::report_baseline(const MemBaseline& baseline, bool summary_only) {
-  assert(MemTracker::is_on(), "Native memory tracking is off");
-  _outputer.start(scale());
-  _outputer.total_usage(
-    amount_in_current_scale(baseline.total_malloc_amount() + baseline.total_reserved_amount()),
-    amount_in_current_scale(baseline.total_malloc_amount() + baseline.total_committed_amount()));
-
-  _outputer.num_of_classes(baseline.number_of_classes());
-  _outputer.num_of_threads(baseline.number_of_threads());
-
-  report_summaries(baseline);
-  if (!summary_only && MemTracker::track_callsite()) {
-    report_virtual_memory_map(baseline);
-    report_callsites(baseline);
-  }
-  _outputer.done();
-}
-
-void BaselineReporter::report_summaries(const MemBaseline& baseline) {
-  _outputer.start_category_summary();
-  MEMFLAGS type;
+void MemSummaryReporter::report() {
+  const char* scale = current_scale();
+  outputStream* out = output();
+  size_t total_reserved_amount = _malloc_snapshot->total() +
+    _vm_snapshot->total_reserved();
+  size_t total_committed_amount = _malloc_snapshot->total() +
+    _vm_snapshot->total_committed();
 
-  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
-    type = MemBaseline::MemType2NameMap[index]._flag;
-    _outputer.category_summary(type,
-      amount_in_current_scale(baseline.reserved_amount(type)),
-      amount_in_current_scale(baseline.committed_amount(type)),
-      amount_in_current_scale(baseline.malloc_amount(type)),
-      baseline.malloc_count(type),
-      amount_in_current_scale(baseline.arena_amount(type)),
-      baseline.arena_count(type));
-  }
-
-  _outputer.done_category_summary();
-}
+  // Overall total
+  out->print_cr("\nNative Memory Tracking:\n");
+  out->print("Total: ");
+  print_total(total_reserved_amount, total_committed_amount);
+  out->print("\n");
 
-void BaselineReporter::report_virtual_memory_map(const MemBaseline& baseline) {
-  _outputer.start_virtual_memory_map();
-  MemBaseline* pBL = const_cast<MemBaseline*>(&baseline);
-  MemPointerArrayIteratorImpl itr = MemPointerArrayIteratorImpl(pBL->_vm_map);
-  VMMemRegionEx* rgn = (VMMemRegionEx*)itr.current();
-  while (rgn != NULL) {
-    if (rgn->is_reserved_region()) {
-      _outputer.reserved_memory_region(FLAGS_TO_MEMORY_TYPE(rgn->flags()),
-        rgn->base(), rgn->base() + rgn->size(), amount_in_current_scale(rgn->size()), rgn->pc());
-    } else {
-      _outputer.committed_memory_region(rgn->base(), rgn->base() + rgn->size(),
-        amount_in_current_scale(rgn->size()), rgn->pc());
-    }
-    rgn = (VMMemRegionEx*)itr.next();
+  // Summary by memory type
+  for (int index = 0; index < mt_number_of_types; index ++) {
+    MEMFLAGS flag = NMTUtil::index_to_flag(index);
+    // thread stack is reported as part of thread category
+    if (flag == mtThreadStack) continue;
+    MallocMemory* malloc_memory = _malloc_snapshot->by_type(flag);
+    VirtualMemory* virtual_memory = _vm_snapshot->by_type(flag);
+
+    report_summary_of_type(flag, malloc_memory, virtual_memory);
   }
-
-  _outputer.done_virtual_memory_map();
 }
 
-void BaselineReporter::report_callsites(const MemBaseline& baseline) {
-  _outputer.start_callsite();
-  MemBaseline* pBL = const_cast<MemBaseline*>(&baseline);
+void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag,
+  MallocMemory*  malloc_memory, VirtualMemory* virtual_memory) {
 
-  pBL->_malloc_cs->sort((FN_SORT)MemBaseline::bl_malloc_sort_by_size);
-  pBL->_vm_cs->sort((FN_SORT)MemBaseline::bl_vm_sort_by_size);
+  size_t reserved_amount  = reserved_total (malloc_memory, virtual_memory);
+  size_t committed_amount = committed_total(malloc_memory, virtual_memory);
 
-  // walk malloc callsites
-  MemPointerArrayIteratorImpl malloc_itr(pBL->_malloc_cs);
-  MallocCallsitePointer*      malloc_callsite =
-                  (MallocCallsitePointer*)malloc_itr.current();
-  while (malloc_callsite != NULL) {
-    _outputer.malloc_callsite(malloc_callsite->addr(),
-        amount_in_current_scale(malloc_callsite->amount()), malloc_callsite->count());
-    malloc_callsite = (MallocCallsitePointer*)malloc_itr.next();
+  // Count thread's native stack in "Thread" category
+  if (flag == mtThread) {
+    const VirtualMemory* thread_stack_usage =
+      (const VirtualMemory*)_vm_snapshot->by_type(mtThreadStack);
+    reserved_amount  += thread_stack_usage->reserved();
+    committed_amount += thread_stack_usage->committed();
+  } else if (flag == mtNMT) {
+    // Count malloc headers in "NMT" category
+    reserved_amount  += _malloc_snapshot->malloc_overhead()->size();
+    committed_amount += _malloc_snapshot->malloc_overhead()->size();
   }
 
-  // walk virtual memory callsite
-  MemPointerArrayIteratorImpl vm_itr(pBL->_vm_cs);
-  VMCallsitePointer*          vm_callsite = (VMCallsitePointer*)vm_itr.current();
-  while (vm_callsite != NULL) {
-    _outputer.virtual_memory_callsite(vm_callsite->addr(),
-      amount_in_current_scale(vm_callsite->reserved_amount()),
-      amount_in_current_scale(vm_callsite->committed_amount()));
-    vm_callsite = (VMCallsitePointer*)vm_itr.next();
-  }
-  pBL->_malloc_cs->sort((FN_SORT)MemBaseline::bl_malloc_sort_by_pc);
-  pBL->_vm_cs->sort((FN_SORT)MemBaseline::bl_vm_sort_by_pc);
-  _outputer.done_callsite();
-}
+  if (amount_in_current_scale(reserved_amount) > 0) {
+    outputStream* out   = output();
+    const char*   scale = current_scale();
+    out->print("-%26s (", NMTUtil::flag_to_name(flag));
+    print_total(reserved_amount, committed_amount);
+    out->print_cr(")");
 
-void BaselineReporter::diff_baselines(const MemBaseline& cur, const MemBaseline& prev,
-  bool summary_only) {
-  assert(MemTracker::is_on(), "Native memory tracking is off");
-  _outputer.start(scale());
-  size_t total_reserved = cur.total_malloc_amount() + cur.total_reserved_amount();
-  size_t total_committed = cur.total_malloc_amount() + cur.total_committed_amount();
-
-  _outputer.diff_total_usage(
-    amount_in_current_scale(total_reserved), amount_in_current_scale(total_committed),
-    diff_in_current_scale(total_reserved,  (prev.total_malloc_amount() + prev.total_reserved_amount())),
-    diff_in_current_scale(total_committed, (prev.total_committed_amount() + prev.total_malloc_amount())));
+    if (flag == mtClass) {
+      // report class count
+      out->print_cr("%27s (classes #" SIZE_FORMAT ")", " ", _class_count);
+    } else if (flag == mtThread) {
+      // report thread count
+      out->print_cr("%27s (thread #" SIZE_FORMAT ")", " ", _malloc_snapshot->thread_count());
+      const VirtualMemory* thread_stack_usage =
+       _vm_snapshot->by_type(mtThreadStack);
+      out->print("%27s (stack: ", " ");
+      print_total(thread_stack_usage->reserved(), thread_stack_usage->committed());
+      out->print_cr(")");
+    }
 
-  _outputer.diff_num_of_classes(cur.number_of_classes(),
-       diff(cur.number_of_classes(), prev.number_of_classes()));
-  _outputer.diff_num_of_threads(cur.number_of_threads(),
-       diff(cur.number_of_threads(), prev.number_of_threads()));
+     // report malloc'd memory
+    if (amount_in_current_scale(malloc_memory->malloc_size()) > 0) {
+      // We don't know how many arena chunks are in used, so don't report the count
+      size_t count = (flag == mtChunk) ? 0 : malloc_memory->malloc_count();
+      print_malloc_line(malloc_memory->malloc_size(), count);
+    }
 
-  diff_summaries(cur, prev);
-  if (!summary_only && MemTracker::track_callsite()) {
-    diff_callsites(cur, prev);
-  }
-  _outputer.done();
-}
-
-void BaselineReporter::diff_summaries(const MemBaseline& cur, const MemBaseline& prev) {
-  _outputer.start_category_summary();
-  MEMFLAGS type;
+    if (amount_in_current_scale(virtual_memory->reserved()) > 0) {
+      print_virtual_memory_line(virtual_memory->reserved(), virtual_memory->committed());
+    }
 
-  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
-    type = MemBaseline::MemType2NameMap[index]._flag;
-    _outputer.diff_category_summary(type,
-      amount_in_current_scale(cur.reserved_amount(type)),
-      amount_in_current_scale(cur.committed_amount(type)),
-      amount_in_current_scale(cur.malloc_amount(type)),
-      cur.malloc_count(type),
-      amount_in_current_scale(cur.arena_amount(type)),
-      cur.arena_count(type),
-      diff_in_current_scale(cur.reserved_amount(type), prev.reserved_amount(type)),
-      diff_in_current_scale(cur.committed_amount(type), prev.committed_amount(type)),
-      diff_in_current_scale(cur.malloc_amount(type), prev.malloc_amount(type)),
-      diff(cur.malloc_count(type), prev.malloc_count(type)),
-      diff_in_current_scale(cur.arena_amount(type), prev.arena_amount(type)),
-      diff(cur.arena_count(type), prev.arena_count(type)));
+    if (amount_in_current_scale(malloc_memory->arena_size()) > 0) {
+      print_arena_line(malloc_memory->arena_size(), malloc_memory->arena_count());
+    }
+
+    if (flag == mtNMT &&
+      amount_in_current_scale(_malloc_snapshot->malloc_overhead()->size()) > 0) {
+      out->print_cr("%27s (tracking overhead=" SIZE_FORMAT "%s)", " ",
+        amount_in_current_scale(_malloc_snapshot->malloc_overhead()->size()), scale);
+    }
+
+    out->print_cr(" ");
   }
-
-  _outputer.done_category_summary();
 }
 
-void BaselineReporter::diff_callsites(const MemBaseline& cur, const MemBaseline& prev) {
-  _outputer.start_callsite();
-  MemBaseline* pBL_cur = const_cast<MemBaseline*>(&cur);
-  MemBaseline* pBL_prev = const_cast<MemBaseline*>(&prev);
+void MemDetailReporter::report_detail() {
+  // Start detail report
+  outputStream* out = output();
+  out->print_cr("Details:\n");
+
+  report_malloc_sites();
+  report_virtual_memory_allocation_sites();
+}
+
+void MemDetailReporter::report_malloc_sites() {
+  MallocSiteIterator         malloc_itr = _baseline.malloc_sites(MemBaseline::by_size);
+  if (malloc_itr.is_empty()) return;
+
+  outputStream* out = output();
 
-  // walk malloc callsites
-  MemPointerArrayIteratorImpl cur_malloc_itr(pBL_cur->_malloc_cs);
-  MemPointerArrayIteratorImpl prev_malloc_itr(pBL_prev->_malloc_cs);
+  const MallocSite* malloc_site;
+  while ((malloc_site = malloc_itr.next()) != NULL) {
+    // Don't report if size is too small
+    if (amount_in_current_scale(malloc_site->size()) == 0)
+      continue;
 
-  MallocCallsitePointer*      cur_malloc_callsite =
-                  (MallocCallsitePointer*)cur_malloc_itr.current();
-  MallocCallsitePointer*      prev_malloc_callsite =
-                  (MallocCallsitePointer*)prev_malloc_itr.current();
+    const NativeCallStack* stack = malloc_site->call_stack();
+    stack->print_on(out);
+    out->print("%29s", " ");
+    print_malloc(malloc_site->size(), malloc_site->count());
+    out->print_cr("\n");
+  }
+}
+
+void MemDetailReporter::report_virtual_memory_allocation_sites()  {
+  VirtualMemorySiteIterator  virtual_memory_itr =
+    _baseline.virtual_memory_sites(MemBaseline::by_size);
+
+  if (virtual_memory_itr.is_empty()) return;
+
+  outputStream* out = output();
+  const VirtualMemoryAllocationSite*  virtual_memory_site;
 
-  while (cur_malloc_callsite != NULL || prev_malloc_callsite != NULL) {
-    if (prev_malloc_callsite == NULL) {
-      assert(cur_malloc_callsite != NULL, "sanity check");
-      // this is a new callsite
-      _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(),
-        amount_in_current_scale(cur_malloc_callsite->amount()),
-        cur_malloc_callsite->count(),
-        diff_in_current_scale(cur_malloc_callsite->amount(), 0),
-        diff(cur_malloc_callsite->count(), 0));
-      cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next();
-    } else if (cur_malloc_callsite == NULL) {
-      assert(prev_malloc_callsite != NULL, "Sanity check");
-      // this callsite is already gone
-      _outputer.diff_malloc_callsite(prev_malloc_callsite->addr(),
-        0, 0,
-        diff_in_current_scale(0, prev_malloc_callsite->amount()),
-        diff(0, prev_malloc_callsite->count()));
-      prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next();
-    } else {
-      assert(cur_malloc_callsite  != NULL,  "Sanity check");
-      assert(prev_malloc_callsite != NULL,  "Sanity check");
-      if (cur_malloc_callsite->addr() < prev_malloc_callsite->addr()) {
-        // this is a new callsite
-        _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(),
-          amount_in_current_scale(cur_malloc_callsite->amount()),
-          cur_malloc_callsite->count(),
-          diff_in_current_scale(cur_malloc_callsite->amount(), 0),
-          diff(cur_malloc_callsite->count(), 0));
-          cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next();
-      } else if (cur_malloc_callsite->addr() > prev_malloc_callsite->addr()) {
-        // this callsite is already gone
-        _outputer.diff_malloc_callsite(prev_malloc_callsite->addr(),
-          0, 0,
-          diff_in_current_scale(0, prev_malloc_callsite->amount()),
-          diff(0, prev_malloc_callsite->count()));
-        prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next();
-      } else {
-        // the same callsite
-        _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(),
-          amount_in_current_scale(cur_malloc_callsite->amount()),
-          cur_malloc_callsite->count(),
-          diff_in_current_scale(cur_malloc_callsite->amount(), prev_malloc_callsite->amount()),
-          diff(cur_malloc_callsite->count(), prev_malloc_callsite->count()));
-        cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next();
-        prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next();
-      }
-    }
+  while ((virtual_memory_site = virtual_memory_itr.next()) != NULL) {
+    // Don't report if size is too small
+    if (amount_in_current_scale(virtual_memory_site->reserved()) == 0)
+      continue;
+
+    const NativeCallStack* stack = virtual_memory_site->call_stack();
+    stack->print_on(out);
+    out->print("%28s (", " ");
+    print_total(virtual_memory_site->reserved(), virtual_memory_site->committed());
+    out->print_cr(")\n");
+  }
+}
+
+
+void MemDetailReporter::report_virtual_memory_map() {
+  // Virtual memory map always in base address order
+  VirtualMemoryAllocationIterator itr = _baseline.virtual_memory_allocations();
+  const ReservedMemoryRegion* rgn;
+
+  output()->print_cr("Virtual memory map:");
+  while ((rgn = itr.next()) != NULL) {
+    report_virtual_memory_region(rgn);
+  }
+}
+
+void MemDetailReporter::report_virtual_memory_region(const ReservedMemoryRegion* reserved_rgn) {
+  assert(reserved_rgn != NULL, "NULL pointer");
+
+  // Don't report if size is too small
+  if (amount_in_current_scale(reserved_rgn->size()) == 0) return;
+
+  outputStream* out = output();
+  const char* scale = current_scale();
+  const NativeCallStack*  stack = reserved_rgn->call_stack();
+  bool all_committed = reserved_rgn->all_committed();
+  const char* region_type = (all_committed ? "reserved and committed" : "reserved");
+  out->print_cr(" ");
+  print_virtual_memory_region(region_type, reserved_rgn->base(), reserved_rgn->size());
+  out->print(" for %s", NMTUtil::flag_to_name(reserved_rgn->flag()));
+  if (stack->is_empty()) {
+    out->print_cr(" ");
+  } else {
+    out->print_cr(" from");
+    stack->print_on(out, 4);
   }
 
-  // walk virtual memory callsite
-  MemPointerArrayIteratorImpl cur_vm_itr(pBL_cur->_vm_cs);
-  MemPointerArrayIteratorImpl prev_vm_itr(pBL_prev->_vm_cs);
-  VMCallsitePointer*          cur_vm_callsite = (VMCallsitePointer*)cur_vm_itr.current();
-  VMCallsitePointer*          prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.current();
-  while (cur_vm_callsite != NULL || prev_vm_callsite != NULL) {
-    if (prev_vm_callsite == NULL || cur_vm_callsite->addr() < prev_vm_callsite->addr()) {
-      // this is a new callsite
-      _outputer.diff_virtual_memory_callsite(cur_vm_callsite->addr(),
-        amount_in_current_scale(cur_vm_callsite->reserved_amount()),
-        amount_in_current_scale(cur_vm_callsite->committed_amount()),
-        diff_in_current_scale(cur_vm_callsite->reserved_amount(), 0),
-        diff_in_current_scale(cur_vm_callsite->committed_amount(), 0));
-      cur_vm_callsite = (VMCallsitePointer*)cur_vm_itr.next();
-    } else if (cur_vm_callsite == NULL || cur_vm_callsite->addr() > prev_vm_callsite->addr()) {
-      // this callsite is already gone
-      _outputer.diff_virtual_memory_callsite(prev_vm_callsite->addr(),
-        amount_in_current_scale(0),
-        amount_in_current_scale(0),
-        diff_in_current_scale(0, prev_vm_callsite->reserved_amount()),
-        diff_in_current_scale(0, prev_vm_callsite->committed_amount()));
-      prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.next();
-    } else { // the same callsite
-      _outputer.diff_virtual_memory_callsite(cur_vm_callsite->addr(),
-        amount_in_current_scale(cur_vm_callsite->reserved_amount()),
-        amount_in_current_scale(cur_vm_callsite->committed_amount()),
-        diff_in_current_scale(cur_vm_callsite->reserved_amount(), prev_vm_callsite->reserved_amount()),
-        diff_in_current_scale(cur_vm_callsite->committed_amount(), prev_vm_callsite->committed_amount()));
-      cur_vm_callsite  = (VMCallsitePointer*)cur_vm_itr.next();
-      prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.next();
-    }
-  }
-
-  _outputer.done_callsite();
-}
-
-size_t BaselineReporter::amount_in_current_scale(size_t amt) const {
-  return (size_t)(((float)amt/(float)_scale) + 0.5);
-}
-
-int BaselineReporter::diff_in_current_scale(size_t value1, size_t value2) const {
-  return (int)(((float)value1 - (float)value2)/((float)_scale) + 0.5);
-}
-
-int BaselineReporter::diff(size_t value1, size_t value2) const {
-  return ((int)value1 - (int)value2);
-}
-
-void BaselineTTYOutputer::start(size_t scale, bool report_diff) {
-  _scale = scale;
-  _output->print_cr(" ");
-  _output->print_cr("Native Memory Tracking:");
-  _output->print_cr(" ");
-}
-
-void BaselineTTYOutputer::done() {
-
-}
+  if (all_committed) return;
 
-void BaselineTTYOutputer::total_usage(size_t total_reserved, size_t total_committed) {
-  const char* unit = memory_unit(_scale);
-  _output->print_cr("Total:  reserved=%d%s,  committed=%d%s",
-    total_reserved, unit, total_committed, unit);
-}
-
-void BaselineTTYOutputer::start_category_summary() {
-  _output->print_cr(" ");
-}
-
-/**
- * report a summary of memory type
- */
-void BaselineTTYOutputer::category_summary(MEMFLAGS type,
-  size_t reserved_amt, size_t committed_amt, size_t malloc_amt,
-  size_t malloc_count, size_t arena_amt, size_t arena_count) {
-
-  // we report mtThreadStack under mtThread category
-  if (type == mtThreadStack) {
-    assert(malloc_amt == 0 && malloc_count == 0 && arena_amt == 0,
-      "Just check");
-    _thread_stack_reserved = reserved_amt;
-    _thread_stack_committed = committed_amt;
-  } else {
-    const char* unit = memory_unit(_scale);
-    size_t total_reserved = (reserved_amt + malloc_amt + arena_amt);
-    size_t total_committed = (committed_amt + malloc_amt + arena_amt);
-    if (type == mtThread) {
-      total_reserved += _thread_stack_reserved;
-      total_committed += _thread_stack_committed;
-    }
-
-    if (total_reserved > 0) {
-      _output->print_cr("-%26s (reserved=%d%s, committed=%d%s)",
-        MemBaseline::type2name(type), total_reserved, unit,
-        total_committed, unit);
-
-      if (type == mtClass) {
-        _output->print_cr("%27s (classes #%d)", " ", _num_of_classes);
-      } else if (type == mtThread) {
-        _output->print_cr("%27s (thread #%d)", " ", _num_of_threads);
-        _output->print_cr("%27s (stack: reserved=%d%s, committed=%d%s)", " ",
-          _thread_stack_reserved, unit, _thread_stack_committed, unit);
-      }
-
-      if (malloc_amt > 0) {
-        if (type != mtChunk) {
-          _output->print_cr("%27s (malloc=%d%s, #%d)", " ", malloc_amt, unit,
-            malloc_count);
-        } else {
-          _output->print_cr("%27s (malloc=%d%s)", " ", malloc_amt, unit);
-        }
-      }
-
-      if (reserved_amt > 0) {
-        _output->print_cr("%27s (mmap: reserved=%d%s, committed=%d%s)",
-          " ", reserved_amt, unit, committed_amt, unit);
-      }
-
-      if (arena_amt > 0) {
-        _output->print_cr("%27s (arena=%d%s, #%d)", " ", arena_amt, unit, arena_count);
-      }
-
-      _output->print_cr(" ");
+  CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions();
+  const CommittedMemoryRegion* committed_rgn;
+  while ((committed_rgn = itr.next()) != NULL) {
+    // Don't report if size is too small
+    if (amount_in_current_scale(committed_rgn->size()) == 0) continue;
+    stack = committed_rgn->call_stack();
+    out->print("\n\t");
+    print_virtual_memory_region("committed", committed_rgn->base(), committed_rgn->size());
+    if (stack->is_empty()) {
+      out->print_cr(" ");
+    } else {
+      out->print_cr(" from");
+      stack->print_on(out, 12);
     }
   }
 }
 
-void BaselineTTYOutputer::done_category_summary() {
-  _output->print_cr(" ");
-}
+void MemSummaryDiffReporter::report_diff() {
+  const char* scale = current_scale();
+  outputStream* out = output();
+  out->print_cr("\nNative Memory Tracking:\n");
 
-
-void BaselineTTYOutputer::start_virtual_memory_map() {
-  _output->print_cr("Virtual memory map:");
-}
+  // Overall diff
+  out->print("Total: ");
+  print_virtual_memory_diff(_current_baseline.total_reserved_memory(),
+    _current_baseline.total_committed_memory(), _early_baseline.total_reserved_memory(),
+    _early_baseline.total_committed_memory());
 
-void BaselineTTYOutputer::reserved_memory_region(MEMFLAGS type, address base, address end,
-                                                 size_t size, address pc) {
-  const char* unit = memory_unit(_scale);
-  char buf[128];
-  int  offset;
-  _output->print_cr(" ");
-  _output->print_cr("[" PTR_FORMAT " - " PTR_FORMAT "] reserved %d%s for %s", base, end, size, unit,
-            MemBaseline::type2name(type));
-  if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
-      _output->print_cr("\t\tfrom [%s+0x%x]", buf, offset);
-  }
-}
+  out->print_cr("\n");
 
-void BaselineTTYOutputer::committed_memory_region(address base, address end, size_t size, address pc) {
-  const char* unit = memory_unit(_scale);
-  char buf[128];
-  int  offset;
-  _output->print("\t[" PTR_FORMAT " - " PTR_FORMAT "] committed %d%s", base, end, size, unit);
-  if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
-      _output->print_cr(" from [%s+0x%x]", buf, offset);
+  // Summary diff by memory type
+  for (int index = 0; index < mt_number_of_types; index ++) {
+    MEMFLAGS flag = NMTUtil::index_to_flag(index);
+    // thread stack is reported as part of thread category
+    if (flag == mtThreadStack) continue;
+    diff_summary_of_type(flag, _early_baseline.malloc_memory(flag),
+      _early_baseline.virtual_memory(flag), _current_baseline.malloc_memory(flag),
+      _current_baseline.virtual_memory(flag));
   }
 }
 
-void BaselineTTYOutputer::done_virtual_memory_map() {
-  _output->print_cr(" ");
-}
-
-
-
-void BaselineTTYOutputer::start_callsite() {
-  _output->print_cr("Details:");
-  _output->print_cr(" ");
-}
-
-void BaselineTTYOutputer::done_callsite() {
-  _output->print_cr(" ");
-}
+void MemSummaryDiffReporter::print_malloc_diff(size_t current_amount, size_t current_count,
+    size_t early_amount, size_t early_count) const {
+  const char* scale = current_scale();
+  outputStream* out = output();
 
-void BaselineTTYOutputer::malloc_callsite(address pc, size_t malloc_amt,
-  size_t malloc_count) {
-  if (malloc_amt > 0) {
-    const char* unit = memory_unit(_scale);
-    char buf[128];
-    int  offset;
-    if (pc == 0) {
-      _output->print("[BOOTSTRAP]%18s", " ");
-    } else if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
-      _output->print_cr("[" PTR_FORMAT "] %s+0x%x", pc, buf, offset);
-      _output->print("%28s", " ");
-    } else {
-      _output->print("[" PTR_FORMAT "]%18s", pc, " ");
+  out->print("malloc=" SIZE_FORMAT "%s", amount_in_current_scale(current_amount), scale);
+  long amount_diff = diff_in_current_scale(current_amount, early_amount);
+  if (amount_diff != 0) {
+    out->print(" %+ld%s", amount_diff, scale);
+  }
+  if (current_count > 0) {
+    out->print(" #" SIZE_FORMAT "", current_count);
+    if (current_count != early_count) {
+      out->print(" %+d", (int)(current_count - early_count));
     }
-
-    _output->print_cr("(malloc=%d%s #%d)", malloc_amt, unit, malloc_count);
-    _output->print_cr(" ");
   }
 }
 
-void BaselineTTYOutputer::virtual_memory_callsite(address pc, size_t reserved_amt,
-  size_t committed_amt) {
-  if (reserved_amt > 0) {
-    const char* unit = memory_unit(_scale);
-    char buf[128];
-    int  offset;
-    if (pc == 0) {
-      _output->print("[BOOTSTRAP]%18s", " ");
-    } else if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
-      _output->print_cr("[" PTR_FORMAT "] %s+0x%x", pc, buf, offset);
-      _output->print("%28s", " ");
-    } else {
-      _output->print("[" PTR_FORMAT "]%18s", pc, " ");
-    }
+void MemSummaryDiffReporter::print_arena_diff(size_t current_amount, size_t current_count,
+  size_t early_amount, size_t early_count) const {
+  const char* scale = current_scale();
+  outputStream* out = output();
+  out->print("arena=" SIZE_FORMAT "%s", amount_in_current_scale(current_amount), scale);
+  if (diff_in_current_scale(current_amount, early_amount) != 0) {
+    out->print(" %+ld", diff_in_current_scale(current_amount, early_amount));
+  }
+
+  out->print(" #" SIZE_FORMAT "", current_count);
+  if (current_count != early_count) {
+    out->print(" %+d", (int)(current_count - early_count));
+  }
+}
 
-    _output->print_cr("(mmap: reserved=%d%s, committed=%d%s)",
-      reserved_amt, unit, committed_amt, unit);
-    _output->print_cr(" ");
+void MemSummaryDiffReporter::print_virtual_memory_diff(size_t current_reserved, size_t current_committed,
+    size_t early_reserved, size_t early_committed) const {
+  const char* scale = current_scale();
+  outputStream* out = output();
+  out->print("reserved=" SIZE_FORMAT "%s", amount_in_current_scale(current_reserved), scale);
+  long reserved_diff = diff_in_current_scale(current_reserved, early_reserved);
+  if (reserved_diff != 0) {
+    out->print(" %+ld%s", reserved_diff, scale);
+  }
+
+  out->print(", committed=" SIZE_FORMAT "%s", amount_in_current_scale(current_committed), scale);
+  long committed_diff = diff_in_current_scale(current_committed, early_committed);
+  if (committed_diff != 0) {
+    out->print(" %+ld%s", committed_diff, scale);
   }
 }
 
-void BaselineTTYOutputer::diff_total_usage(size_t total_reserved,
-  size_t total_committed, int reserved_diff, int committed_diff) {
-  const char* unit = memory_unit(_scale);
-  _output->print_cr("Total:  reserved=%d%s  %+d%s, committed=%d%s %+d%s",
-    total_reserved, unit, reserved_diff, unit, total_committed, unit,
-    committed_diff, unit);
-}
+
+void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag, const MallocMemory* early_malloc,
+  const VirtualMemory* early_vm, const MallocMemory* current_malloc,
+  const VirtualMemory* current_vm) const {
+
+  outputStream* out = output();
+  const char* scale = current_scale();
+
+  // Total reserved and committed memory in current baseline
+  size_t current_reserved_amount  = reserved_total (current_malloc, current_vm);
+  size_t current_committed_amount = committed_total(current_malloc, current_vm);
+
+  // Total reserved and committed memory in early baseline
+  size_t early_reserved_amount  = reserved_total(early_malloc, early_vm);
+  size_t early_committed_amount = committed_total(early_malloc, early_vm);
 
-void BaselineTTYOutputer::diff_category_summary(MEMFLAGS type,
-  size_t cur_reserved_amt, size_t cur_committed_amt,
-  size_t cur_malloc_amt, size_t cur_malloc_count,
-  size_t cur_arena_amt, size_t cur_arena_count,
-  int reserved_diff, int committed_diff, int malloc_diff,
-  int malloc_count_diff, int arena_diff, int arena_count_diff) {
+  // Adjust virtual memory total
+  if (flag == mtThread) {
+    const VirtualMemory* early_thread_stack_usage =
+      _early_baseline.virtual_memory(mtThreadStack);
+    const VirtualMemory* current_thread_stack_usage =
+      _current_baseline.virtual_memory(mtThreadStack);
+
+    early_reserved_amount  += early_thread_stack_usage->reserved();
+    early_committed_amount += early_thread_stack_usage->committed();
+
+    current_reserved_amount  += current_thread_stack_usage->reserved();
+    current_committed_amount += current_thread_stack_usage->committed();
+  } else if (flag == mtNMT) {
+    early_reserved_amount  += _early_baseline.malloc_tracking_overhead();
+    early_committed_amount += _early_baseline.malloc_tracking_overhead();
+
+    current_reserved_amount  += _current_baseline.malloc_tracking_overhead();
+    current_committed_amount += _current_baseline.malloc_tracking_overhead();
+  }
 
-  if (type == mtThreadStack) {
-    assert(cur_malloc_amt == 0 && cur_malloc_count == 0 &&
-      cur_arena_amt == 0, "Just check");
-    _thread_stack_reserved = cur_reserved_amt;
-    _thread_stack_committed = cur_committed_amt;
-    _thread_stack_reserved_diff = reserved_diff;
-    _thread_stack_committed_diff = committed_diff;
-  } else {
-    const char* unit = memory_unit(_scale);
-    size_t total_reserved = (cur_reserved_amt + cur_malloc_amt + cur_arena_amt);
-    // nothing to report in this category
-    if (total_reserved == 0) {
-      return;
-    }
-    int    diff_reserved = (reserved_diff + malloc_diff + arena_diff);
+  if (amount_in_current_scale(current_reserved_amount) > 0 ||
+      diff_in_current_scale(current_reserved_amount, early_reserved_amount) != 0) {
+
+    // print summary line
+    out->print("-%26s (", NMTUtil::flag_to_name(flag));
+    print_virtual_memory_diff(current_reserved_amount, current_committed_amount,
+      early_reserved_amount, early_committed_amount);
+    out->print_cr(")");
 
-    // category summary
-    _output->print("-%26s (reserved=%d%s", MemBaseline::type2name(type),
-      total_reserved, unit);
+    // detail lines
+    if (flag == mtClass) {
+      // report class count
+      out->print("%27s (classes #" SIZE_FORMAT "", " ", _current_baseline.class_count());
+      int class_count_diff = (int)(_current_baseline.class_count() -
+        _early_baseline.class_count());
+      if (_current_baseline.class_count() != _early_baseline.class_count()) {
+        out->print(" %+d", (int)(_current_baseline.class_count() - _early_baseline.class_count()));
+      }
+      out->print_cr(")");
+    } else if (flag == mtThread) {
+      // report thread count
+      out->print("%27s (thread #" SIZE_FORMAT "", " ", _current_baseline.thread_count());
+      int thread_count_diff = (int)(_current_baseline.thread_count() -
+          _early_baseline.thread_count());
+      if (thread_count_diff != 0) {
+        out->print(" %+d", thread_count_diff);
+      }
+      out->print_cr(")");
 
-    if (diff_reserved != 0) {
-      _output->print(" %+d%s", diff_reserved, unit);
-    }
+      // report thread stack
+      const VirtualMemory* current_thread_stack =
+          _current_baseline.virtual_memory(mtThreadStack);
+      const VirtualMemory* early_thread_stack =
+        _early_baseline.virtual_memory(mtThreadStack);
 
-    size_t total_committed = cur_committed_amt + cur_malloc_amt + cur_arena_amt;
-    _output->print(", committed=%d%s", total_committed, unit);
-
-    int total_committed_diff = committed_diff + malloc_diff + arena_diff;
-    if (total_committed_diff != 0) {
-      _output->print(" %+d%s", total_committed_diff, unit);
+      out->print("%27s (stack: ", " ");
+      print_virtual_memory_diff(current_thread_stack->reserved(), current_thread_stack->committed(),
+        early_thread_stack->reserved(), early_thread_stack->committed());
+      out->print_cr(")");
     }
 
-    _output->print_cr(")");
+    // Report malloc'd memory
+    size_t current_malloc_amount = current_malloc->malloc_size();
+    size_t early_malloc_amount   = early_malloc->malloc_size();
+    if (amount_in_current_scale(current_malloc_amount) > 0 ||
+        diff_in_current_scale(current_malloc_amount, early_malloc_amount) != 0) {
+      out->print("%28s(", " ");
+      print_malloc_diff(current_malloc_amount, (flag == mtChunk) ? 0 : current_malloc->malloc_count(),
+        early_malloc_amount, early_malloc->malloc_count());
+      out->print_cr(")");
+    }
 
-    // special cases
-    if (type == mtClass) {
-      _output->print("%27s (classes #%d", " ", _num_of_classes);
-      if (_num_of_classes_diff != 0) {
-        _output->print(" %+d", _num_of_classes_diff);
-      }
-      _output->print_cr(")");
-    } else if (type == mtThread) {
-      // thread count
-      _output->print("%27s (thread #%d", " ", _num_of_threads);
-      if (_num_of_threads_diff != 0) {
-        _output->print_cr(" %+d)", _num_of_threads_diff);
-      } else {
-        _output->print_cr(")");
-      }
-      _output->print("%27s (stack: reserved=%d%s", " ", _thread_stack_reserved, unit);
-      if (_thread_stack_reserved_diff != 0) {
-        _output->print(" %+d%s", _thread_stack_reserved_diff, unit);
-      }
-
-      _output->print(", committed=%d%s", _thread_stack_committed, unit);
-      if (_thread_stack_committed_diff != 0) {
-        _output->print(" %+d%s",_thread_stack_committed_diff, unit);
-      }
-
-      _output->print_cr(")");
+    // Report virtual memory
+    if (amount_in_current_scale(current_vm->reserved()) > 0 ||
+        diff_in_current_scale(current_vm->reserved(), early_vm->reserved()) != 0) {
+      out->print("%27s (mmap: ", " ");
+      print_virtual_memory_diff(current_vm->reserved(), current_vm->committed(),
+        early_vm->reserved(), early_vm->committed());
+      out->print_cr(")");
     }
 
-    // malloc'd memory
-    if (cur_malloc_amt > 0) {
-      _output->print("%27s (malloc=%d%s", " ", cur_malloc_amt, unit);
-      if (malloc_diff != 0) {
-        _output->print(" %+d%s", malloc_diff, unit);
-      }
-      if (type != mtChunk) {
-        _output->print(", #%d", cur_malloc_count);
-        if (malloc_count_diff) {
-          _output->print(" %+d", malloc_count_diff);
-        }
-      }
-      _output->print_cr(")");
+    // Report arena memory
+    if (amount_in_current_scale(current_malloc->arena_size()) > 0 ||
+        diff_in_current_scale(current_malloc->arena_size(), early_malloc->arena_size()) != 0) {
+      out->print("%28s(", " ");
+      print_arena_diff(current_malloc->arena_size(), current_malloc->arena_count(),
+        early_malloc->arena_size(), early_malloc->arena_count());
+      out->print_cr(")");
     }
 
-    // mmap'd memory
-    if (cur_reserved_amt > 0) {
-      _output->print("%27s (mmap: reserved=%d%s", " ", cur_reserved_amt, unit);
-      if (reserved_diff != 0) {
-        _output->print(" %+d%s", reserved_diff, unit);
-      }
+    // Report native memory tracking overhead
+    if (flag == mtNMT) {
+      size_t current_tracking_overhead = amount_in_current_scale(_current_baseline.malloc_tracking_overhead());
+      size_t early_tracking_overhead   = amount_in_current_scale(_early_baseline.malloc_tracking_overhead());
 
-      _output->print(", committed=%d%s", cur_committed_amt, unit);
-      if (committed_diff != 0) {
-        _output->print(" %+d%s", committed_diff, unit);
-      }
-      _output->print_cr(")");
-    }
+      out->print("%27s (tracking overhead=" SIZE_FORMAT "%s", " ",
+        amount_in_current_scale(_current_baseline.malloc_tracking_overhead()), scale);
 
-    // arena memory
-    if (cur_arena_amt > 0) {
-      _output->print("%27s (arena=%d%s", " ", cur_arena_amt, unit);
-      if (arena_diff != 0) {
-        _output->print(" %+d%s", arena_diff, unit);
+      long overhead_diff = diff_in_current_scale(_current_baseline.malloc_tracking_overhead(),
+           _early_baseline.malloc_tracking_overhead());
+      if (overhead_diff != 0) {
+        out->print(" %+ld%s", overhead_diff, scale);
       }
-      _output->print(", #%d", cur_arena_count);
-      if (arena_count_diff != 0) {
-        _output->print(" %+d", arena_count_diff);
-      }
-      _output->print_cr(")");
+      out->print_cr(")");
     }
-
-    _output->print_cr(" ");
+    out->print_cr(" ");
   }
 }
 
-void BaselineTTYOutputer::diff_malloc_callsite(address pc,
-    size_t cur_malloc_amt, size_t cur_malloc_count,
-    int malloc_diff, int malloc_count_diff) {
-  if (malloc_diff != 0) {
-    const char* unit = memory_unit(_scale);
-    char buf[128];
-    int  offset;
-    if (pc == 0) {
-      _output->print_cr("[BOOTSTRAP]%18s", " ");
+void MemDetailDiffReporter::report_diff() {
+  MemSummaryDiffReporter::report_diff();
+  diff_malloc_sites();
+  diff_virtual_memory_sites();
+}
+
+void MemDetailDiffReporter::diff_malloc_sites() const {
+  MallocSiteIterator early_itr = _early_baseline.malloc_sites(MemBaseline::by_site);
+  MallocSiteIterator current_itr = _current_baseline.malloc_sites(MemBaseline::by_site);
+
+  const MallocSite* early_site   = early_itr.next();
+  const MallocSite* current_site = current_itr.next();
+
+  while (early_site != NULL || current_site != NULL) {
+    if (early_site == NULL) {
+      new_malloc_site(current_site);
+      current_site = current_itr.next();
+    } else if (current_site == NULL) {
+      old_malloc_site(early_site);
+      early_site = early_itr.next();
     } else {
-      if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
-        _output->print_cr("[" PTR_FORMAT "] %s+0x%x", pc, buf, offset);
-        _output->print("%28s", " ");
+      int compVal = current_site->call_stack()->compare(*early_site->call_stack());
+      if (compVal < 0) {
+        new_malloc_site(current_site);
+        current_site = current_itr.next();
+      } else if (compVal > 0) {
+        old_malloc_site(early_site);
+        early_site = early_itr.next();
       } else {
-        _output->print("[" PTR_FORMAT "]%18s", pc, " ");
+        diff_malloc_site(early_site, current_site);
+        early_site   = early_itr.next();
+        current_site = current_itr.next();
       }
     }
+  }
+}
 
-    _output->print("(malloc=%d%s", cur_malloc_amt, unit);
-    if (malloc_diff != 0) {
-      _output->print(" %+d%s", malloc_diff, unit);
+void MemDetailDiffReporter::diff_virtual_memory_sites() const {
+  VirtualMemorySiteIterator early_itr = _early_baseline.virtual_memory_sites(MemBaseline::by_site);
+  VirtualMemorySiteIterator current_itr = _current_baseline.virtual_memory_sites(MemBaseline::by_site);
+
+  const VirtualMemoryAllocationSite* early_site   = early_itr.next();
+  const VirtualMemoryAllocationSite* current_site = current_itr.next();
+
+  while (early_site != NULL || current_site != NULL) {
+    if (early_site == NULL) {
+      new_virtual_memory_site(current_site);
+      current_site = current_itr.next();
+    } else if (current_site == NULL) {
+      old_virtual_memory_site(early_site);
+      early_site = early_itr.next();
+    } else {
+      int compVal = current_site->call_stack()->compare(*early_site->call_stack());
+      if (compVal < 0) {
+        new_virtual_memory_site(current_site);
+        current_site = current_itr.next();
+      } else if (compVal > 0) {
+        old_virtual_memory_site(early_site);
+        early_site = early_itr.next();
+      } else {
+        diff_virtual_memory_site(early_site, current_site);
+        early_site   = early_itr.next();
+        current_site = current_itr.next();
+      }
     }
-    _output->print(", #%d", cur_malloc_count);
-    if (malloc_count_diff != 0) {
-      _output->print(" %+d", malloc_count_diff);
-    }
-    _output->print_cr(")");
-    _output->print_cr(" ");
   }
 }
 
-void BaselineTTYOutputer::diff_virtual_memory_callsite(address pc,
-    size_t cur_reserved_amt, size_t cur_committed_amt,
-    int reserved_diff, int committed_diff) {
-  if (reserved_diff != 0 || committed_diff != 0) {
-    const char* unit = memory_unit(_scale);
-    char buf[64];
-    int  offset;
-    if (pc == 0) {
-      _output->print_cr("[BOOSTRAP]%18s", " ");
-    } else {
-      if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
-        _output->print_cr("[" PTR_FORMAT "] %s+0x%x", pc, buf, offset);
-        _output->print("%28s", " ");
-      } else {
-        _output->print("[" PTR_FORMAT "]%18s", pc, " ");
-      }
-    }
+
+void MemDetailDiffReporter::new_malloc_site(const MallocSite* malloc_site) const {
+  diff_malloc_site(malloc_site->call_stack(), malloc_site->size(), malloc_site->count(),
+    0, 0);
+}
+
+void MemDetailDiffReporter::old_malloc_site(const MallocSite* malloc_site) const {
+  diff_malloc_site(malloc_site->call_stack(), 0, 0, malloc_site->size(),
+    malloc_site->count());
+}
+
+void MemDetailDiffReporter::diff_malloc_site(const MallocSite* early,
+  const MallocSite* current)  const {
+  diff_malloc_site(current->call_stack(), current->size(), current->count(),
+    early->size(), early->count());
+}
+
+void MemDetailDiffReporter::diff_malloc_site(const NativeCallStack* stack, size_t current_size,
+  size_t current_count, size_t early_size, size_t early_count) const {
+  outputStream* out = output();
+
+  assert(stack != NULL, "NULL stack");
+
+  if (diff_in_current_scale(current_size, early_size) == 0) {
+      return;
+  }
+
+  stack->print_on(out);
+  out->print("%28s (", " ");
+  print_malloc_diff(current_size, current_count,
+    early_size, early_count);
 
-    _output->print("(mmap: reserved=%d%s", cur_reserved_amt, unit);
-    if (reserved_diff != 0) {
-      _output->print(" %+d%s", reserved_diff, unit);
-    }
-    _output->print(", committed=%d%s", cur_committed_amt, unit);
-    if (committed_diff != 0) {
-      _output->print(" %+d%s", committed_diff, unit);
-    }
-    _output->print_cr(")");
-    _output->print_cr(" ");
+  out->print_cr(")\n");
+}
+
+
+void MemDetailDiffReporter::new_virtual_memory_site(const VirtualMemoryAllocationSite* site) const {
+  diff_virtual_memory_site(site->call_stack(), site->reserved(), site->committed(), 0, 0);
+}
+
+void MemDetailDiffReporter::old_virtual_memory_site(const VirtualMemoryAllocationSite* site) const {
+  diff_virtual_memory_site(site->call_stack(), 0, 0, site->reserved(), site->committed());
+}
+
+void MemDetailDiffReporter::diff_virtual_memory_site(const VirtualMemoryAllocationSite* early,
+  const VirtualMemoryAllocationSite* current) const {
+  diff_virtual_memory_site(current->call_stack(), current->reserved(), current->committed(),
+    early->reserved(), early->committed());
+}
+
+void MemDetailDiffReporter::diff_virtual_memory_site(const NativeCallStack* stack, size_t current_reserved,
+  size_t current_committed, size_t early_reserved, size_t early_committed) const  {
+  outputStream* out = output();
+
+  // no change
+  if (diff_in_current_scale(current_reserved, early_reserved) == 0 &&
+      diff_in_current_scale(current_committed, early_committed) == 0) {
+    return;
   }
-}
+
+  stack->print_on(out);
+  out->print("%28s (mmap: ", " ");
+  print_virtual_memory_diff(current_reserved, current_committed,
+    early_reserved, early_committed);
+
+  out->print_cr(")\n");
+ }
+
--- a/hotspot/src/share/vm/services/memReporter.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/services/memReporter.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,262 +25,217 @@
 #ifndef SHARE_VM_SERVICES_MEM_REPORTER_HPP
 #define SHARE_VM_SERVICES_MEM_REPORTER_HPP
 
-#include "runtime/mutexLocker.hpp"
+#if INCLUDE_NMT
+
+#include "oops/instanceKlass.hpp"
 #include "services/memBaseline.hpp"
-#include "services/memTracker.hpp"
-#include "utilities/ostream.hpp"
-#include "utilities/macros.hpp"
-
-#if INCLUDE_NMT
+#include "services/nmtCommon.hpp"
+#include "services/mallocTracker.hpp"
+#include "services/virtualMemoryTracker.hpp"
 
 /*
- * MemBaselineReporter reports data to this outputer class,
- * ReportOutputer is responsible for format, store and redirect
- * the data to the final destination.
- */
-class BaselineOutputer : public StackObj {
+ * Base class that provides helpers
+*/
+class MemReporterBase : public StackObj {
+ private:
+  size_t        _scale;  // report in this scale
+  outputStream* _output; // destination
+
  public:
-  // start to report memory usage in specified scale.
-  // if report_diff = true, the reporter reports baseline comparison
-  // information.
-
-  virtual void start(size_t scale, bool report_diff = false) = 0;
-  // Done reporting
-  virtual void done() = 0;
+  MemReporterBase(outputStream* out = NULL, size_t scale = K)
+    : _scale(scale) {
+    _output = (out == NULL) ? tty : out;
+  }
 
-  /* report baseline summary information */
-  virtual void total_usage(size_t total_reserved,
-                           size_t total_committed) = 0;
-  virtual void num_of_classes(size_t classes) = 0;
-  virtual void num_of_threads(size_t threads) = 0;
-
-  virtual void thread_info(size_t stack_reserved_amt, size_t stack_committed_amt) = 0;
+ protected:
+  inline outputStream* output() const {
+    return _output;
+  }
+  // Current reporting scale
+  inline const char* current_scale() const {
+    return NMTUtil::scale_name(_scale);
+  }
+  // Convert memory amount in bytes to current reporting scale
+  inline size_t amount_in_current_scale(size_t amount) const {
+    return NMTUtil::amount_in_scale(amount, _scale);
+  }
 
-  /* report baseline summary comparison */
-  virtual void diff_total_usage(size_t total_reserved,
-                                size_t total_committed,
-                                int reserved_diff,
-                                int committed_diff) = 0;
-  virtual void diff_num_of_classes(size_t classes, int diff) = 0;
-  virtual void diff_num_of_threads(size_t threads, int diff) = 0;
+  // Convert diff amount in bytes to current reporting scale
+  inline long diff_in_current_scale(size_t s1, size_t s2) const {
+    long amount = (long)(s1 - s2);
+    long scale = (long)_scale;
+    amount = (amount > 0) ? (amount + scale / 2) : (amount - scale / 2);
+    return amount / scale;
+  }
 
-  virtual void diff_thread_info(size_t stack_reserved, size_t stack_committed,
-        int stack_reserved_diff, int stack_committed_diff) = 0;
+  // Helper functions
+  // Calculate total reserved and committed amount
+  size_t reserved_total(const MallocMemory* malloc, const VirtualMemory* vm) const;
+  size_t committed_total(const MallocMemory* malloc, const VirtualMemory* vm) const;
 
 
-  /*
-   * memory summary by memory types.
-   * for each memory type, following summaries are reported:
-   *  - reserved amount, committed amount
-   *  - malloc'd amount, malloc count
-   *  - arena amount, arena count
-   */
+  // Print summary total, malloc and virtual memory
+  void print_total(size_t reserved, size_t committed) const;
+  void print_malloc(size_t amount, size_t count) const;
+  void print_virtual_memory(size_t reserved, size_t committed) const;
 
-  // start reporting memory summary by memory type
-  virtual void start_category_summary() = 0;
+  void print_malloc_line(size_t amount, size_t count) const;
+  void print_virtual_memory_line(size_t reserved, size_t committed) const;
+  void print_arena_line(size_t amount, size_t count) const;
 
-  virtual void category_summary(MEMFLAGS type, size_t reserved_amt,
-                                size_t committed_amt,
-                                size_t malloc_amt, size_t malloc_count,
-                                size_t arena_amt, size_t arena_count) = 0;
+  void print_virtual_memory_region(const char* type, address base, size_t size) const;
+};
 
-  virtual void diff_category_summary(MEMFLAGS type, size_t cur_reserved_amt,
-                                size_t cur_committed_amt,
-                                size_t cur_malloc_amt, size_t cur_malloc_count,
-                                size_t cur_arena_amt, size_t cur_arena_count,
-                                int reserved_diff, int committed_diff, int malloc_diff,
-                                int malloc_count_diff, int arena_diff,
-                                int arena_count_diff) = 0;
+/*
+ * The class is for generating summary tracking report.
+ */
+class MemSummaryReporter : public MemReporterBase {
+ private:
+  MallocMemorySnapshot*   _malloc_snapshot;
+  VirtualMemorySnapshot*  _vm_snapshot;
+  size_t                  _class_count;
 
-  virtual void done_category_summary() = 0;
-
-  virtual void start_virtual_memory_map() = 0;
-  virtual void reserved_memory_region(MEMFLAGS type, address base, address end, size_t size, address pc) = 0;
-  virtual void committed_memory_region(address base, address end, size_t size, address pc) = 0;
-  virtual void done_virtual_memory_map() = 0;
+ public:
+  // Report summary tracking data from global snapshots directly.
+  // This constructor is used for final reporting and hs_err reporting.
+  MemSummaryReporter(MallocMemorySnapshot* malloc_snapshot,
+    VirtualMemorySnapshot* vm_snapshot, outputStream* output,
+    size_t class_count = 0, size_t scale = K) :
+    MemReporterBase(output, scale),
+    _malloc_snapshot(malloc_snapshot),
+    _vm_snapshot(vm_snapshot) {
+    if (class_count == 0) {
+      _class_count = InstanceKlass::number_of_instance_classes();
+    } else {
+      _class_count = class_count;
+    }
+  }
+  // This constructor is for normal reporting from a recent baseline.
+  MemSummaryReporter(MemBaseline& baseline, outputStream* output,
+    size_t scale = K) : MemReporterBase(output, scale),
+    _malloc_snapshot(baseline.malloc_memory_snapshot()),
+    _vm_snapshot(baseline.virtual_memory_snapshot()),
+    _class_count(baseline.class_count()) { }
 
-  /*
-   *  Report callsite information
-   */
-  virtual void start_callsite() = 0;
-  virtual void malloc_callsite(address pc, size_t malloc_amt, size_t malloc_count) = 0;
-  virtual void virtual_memory_callsite(address pc, size_t reserved_amt, size_t committed_amt) = 0;
 
-  virtual void diff_malloc_callsite(address pc, size_t cur_malloc_amt, size_t cur_malloc_count,
-              int malloc_diff, int malloc_count_diff) = 0;
-  virtual void diff_virtual_memory_callsite(address pc, size_t cur_reserved_amt, size_t cur_committed_amt,
-              int reserved_diff, int committed_diff) = 0;
-
-  virtual void done_callsite() = 0;
-
-  // return current scale in "KB", "MB" or "GB"
-  static const char* memory_unit(size_t scale);
+  // Generate summary report
+  virtual void report();
+ private:
+  // Report summary for each memory type
+  void report_summary_of_type(MEMFLAGS type, MallocMemory* malloc_memory,
+    VirtualMemory* virtual_memory);
 };
 
 /*
- * This class reports processed data from a baseline or
- * the changes between the two baseline.
+ * The class is for generating detail tracking report.
  */
-class BaselineReporter : public StackObj {
+class MemDetailReporter : public MemSummaryReporter {
  private:
-  BaselineOutputer&  _outputer;
-  size_t             _scale;
+  MemBaseline&   _baseline;
 
  public:
-  // construct a reporter that reports memory usage
-  // in specified scale
-  BaselineReporter(BaselineOutputer& outputer, size_t scale = K):
-    _outputer(outputer) {
-    _scale = scale;
+  MemDetailReporter(MemBaseline& baseline, outputStream* output, size_t scale = K) :
+    MemSummaryReporter(baseline, output, scale),
+     _baseline(baseline) { }
+
+  // Generate detail report.
+  // The report contains summary and detail sections.
+  virtual void report() {
+    MemSummaryReporter::report();
+    report_virtual_memory_map();
+    report_detail();
   }
-  virtual void report_baseline(const MemBaseline& baseline, bool summary_only = false);
-  virtual void diff_baselines(const MemBaseline& cur, const MemBaseline& prev,
-                              bool summary_only = false);
-
-  void set_scale(size_t scale);
-  size_t scale() const { return _scale; }
 
  private:
-  void report_summaries(const MemBaseline& baseline);
-  void report_virtual_memory_map(const MemBaseline& baseline);
-  void report_callsites(const MemBaseline& baseline);
-
-  void diff_summaries(const MemBaseline& cur, const MemBaseline& prev);
-  void diff_callsites(const MemBaseline& cur, const MemBaseline& prev);
+  // Report detail tracking data.
+  void report_detail();
+  // Report virtual memory map
+  void report_virtual_memory_map();
+  // Report malloc allocation sites
+  void report_malloc_sites();
+  // Report virtual memory reservation sites
+  void report_virtual_memory_allocation_sites();
 
-  // calculate memory size in current memory scale
-  size_t amount_in_current_scale(size_t amt) const;
-  // diff two unsigned values in current memory scale
-  int    diff_in_current_scale(size_t value1, size_t value2) const;
-  // diff two unsigned value
-  int    diff(size_t value1, size_t value2) const;
+  // Report a virtual memory region
+  void report_virtual_memory_region(const ReservedMemoryRegion* rgn);
 };
 
 /*
- * tty output implementation. Native memory tracking
- * DCmd uses this outputer.
+ * The class is for generating summary comparison report.
+ * It compares current memory baseline against an early baseline.
  */
-class BaselineTTYOutputer : public BaselineOutputer {
- private:
-  size_t         _scale;
-
-  size_t         _num_of_classes;
-  size_t         _num_of_threads;
-  size_t         _thread_stack_reserved;
-  size_t         _thread_stack_committed;
-
-  int            _num_of_classes_diff;
-  int            _num_of_threads_diff;
-  int            _thread_stack_reserved_diff;
-  int            _thread_stack_committed_diff;
-
-  outputStream*  _output;
+class MemSummaryDiffReporter : public MemReporterBase {
+ protected:
+  MemBaseline&      _early_baseline;
+  MemBaseline&      _current_baseline;
 
  public:
-  BaselineTTYOutputer(outputStream* st) {
-    _scale = K;
-    _num_of_classes = 0;
-    _num_of_threads = 0;
-    _thread_stack_reserved = 0;
-    _thread_stack_committed = 0;
-    _num_of_classes_diff = 0;
-    _num_of_threads_diff = 0;
-    _thread_stack_reserved_diff = 0;
-    _thread_stack_committed_diff = 0;
-    _output = st;
-  }
-
-  // begin reporting memory usage in specified scale
-  void start(size_t scale, bool report_diff = false);
-  // done reporting
-  void done();
-
-  // total memory usage
-  void total_usage(size_t total_reserved,
-                   size_t total_committed);
-  // report total loaded classes
-  void num_of_classes(size_t classes) {
-    _num_of_classes = classes;
-  }
-
-  void num_of_threads(size_t threads) {
-    _num_of_threads = threads;
-  }
-
-  void thread_info(size_t stack_reserved_amt, size_t stack_committed_amt) {
-    _thread_stack_reserved = stack_reserved_amt;
-    _thread_stack_committed = stack_committed_amt;
+  MemSummaryDiffReporter(MemBaseline& early_baseline, MemBaseline& current_baseline,
+    outputStream* output, size_t scale = K) : MemReporterBase(output, scale),
+    _early_baseline(early_baseline), _current_baseline(current_baseline) {
+    assert(early_baseline.baseline_type()   != MemBaseline::Not_baselined, "Not baselined");
+    assert(current_baseline.baseline_type() != MemBaseline::Not_baselined, "Not baselined");
   }
 
-  void diff_total_usage(size_t total_reserved,
-                        size_t total_committed,
-                        int reserved_diff,
-                        int committed_diff);
-
-  void diff_num_of_classes(size_t classes, int diff) {
-    _num_of_classes = classes;
-    _num_of_classes_diff = diff;
-  }
-
-  void diff_num_of_threads(size_t threads, int diff) {
-    _num_of_threads = threads;
-    _num_of_threads_diff = diff;
-  }
-
-  void diff_thread_info(size_t stack_reserved_amt, size_t stack_committed_amt,
-               int stack_reserved_diff, int stack_committed_diff) {
-    _thread_stack_reserved = stack_reserved_amt;
-    _thread_stack_committed = stack_committed_amt;
-    _thread_stack_reserved_diff = stack_reserved_diff;
-    _thread_stack_committed_diff = stack_committed_diff;
-  }
+  // Generate summary comparison report
+  virtual void report_diff();
 
-  /*
-   * Report memory summary categoriuzed by memory types.
-   * For each memory type, following summaries are reported:
-   *  - reserved amount, committed amount
-   *  - malloc-ed amount, malloc count
-   *  - arena amount, arena count
-   */
-  // start reporting memory summary by memory type
-  void start_category_summary();
-  void category_summary(MEMFLAGS type, size_t reserved_amt, size_t committed_amt,
-                               size_t malloc_amt, size_t malloc_count,
-                               size_t arena_amt, size_t arena_count);
-
-  void diff_category_summary(MEMFLAGS type, size_t cur_reserved_amt,
-                          size_t cur_committed_amt,
-                          size_t cur_malloc_amt, size_t cur_malloc_count,
-                          size_t cur_arena_amt, size_t cur_arena_count,
-                          int reserved_diff, int committed_diff, int malloc_diff,
-                          int malloc_count_diff, int arena_diff,
-                          int arena_count_diff);
+ private:
+  // report the comparison of each memory type
+  void diff_summary_of_type(MEMFLAGS type,
+    const MallocMemory* early_malloc, const VirtualMemory* early_vm,
+    const MallocMemory* current_malloc, const VirtualMemory* current_vm) const;
 
-  void done_category_summary();
-
-  // virtual memory map
-  void start_virtual_memory_map();
-  void reserved_memory_region(MEMFLAGS type, address base, address end, size_t size, address pc);
-  void committed_memory_region(address base, address end, size_t size, address pc);
-  void done_virtual_memory_map();
-
-
-  /*
-   *  Report callsite information
-   */
-  void start_callsite();
-  void malloc_callsite(address pc, size_t malloc_amt, size_t malloc_count);
-  void virtual_memory_callsite(address pc, size_t reserved_amt, size_t committed_amt);
-
-  void diff_malloc_callsite(address pc, size_t cur_malloc_amt, size_t cur_malloc_count,
-              int malloc_diff, int malloc_count_diff);
-  void diff_virtual_memory_callsite(address pc, size_t cur_reserved_amt, size_t cur_committed_amt,
-              int reserved_diff, int committed_diff);
-
-  void done_callsite();
+ protected:
+  void print_malloc_diff(size_t current_amount, size_t current_count,
+    size_t early_amount, size_t early_count) const;
+  void print_virtual_memory_diff(size_t current_reserved, size_t current_committed,
+    size_t early_reserved, size_t early_committed) const;
+  void print_arena_diff(size_t current_amount, size_t current_count,
+    size_t early_amount, size_t early_count) const;
 };
 
+/*
+ * The class is for generating detail comparison report.
+ * It compares current memory baseline against an early baseline,
+ * both baselines have to be detail baseline.
+ */
+class MemDetailDiffReporter : public MemSummaryDiffReporter {
+ public:
+  MemDetailDiffReporter(MemBaseline& early_baseline, MemBaseline& current_baseline,
+    outputStream* output, size_t scale = K) :
+    MemSummaryDiffReporter(early_baseline, current_baseline, output, scale) { }
+
+  // Generate detail comparison report
+  virtual void report_diff();
+
+  // Malloc allocation site comparison
+  void diff_malloc_sites() const;
+  // Virutal memory reservation site comparison
+  void diff_virtual_memory_sites() const;
+
+  // New malloc allocation site in recent baseline
+  void new_malloc_site (const MallocSite* site) const;
+  // The malloc allocation site is not in recent baseline
+  void old_malloc_site (const MallocSite* site) const;
+  // Compare malloc allocation site, it is in both baselines
+  void diff_malloc_site(const MallocSite* early, const MallocSite* current)  const;
+
+  // New virtual memory allocation site in recent baseline
+  void new_virtual_memory_site (const VirtualMemoryAllocationSite* callsite) const;
+  // The virtual memory allocation site is not in recent baseline
+  void old_virtual_memory_site (const VirtualMemoryAllocationSite* callsite) const;
+  // Compare virtual memory allocation site, it is in both baseline
+  void diff_virtual_memory_site(const VirtualMemoryAllocationSite* early,
+                                const VirtualMemoryAllocationSite* current)  const;
+
+  void diff_malloc_site(const NativeCallStack* stack, size_t current_size,
+    size_t currrent_count, size_t early_size, size_t early_count) const;
+  void diff_virtual_memory_site(const NativeCallStack* stack, size_t current_reserved,
+    size_t current_committed, size_t early_reserved, size_t early_committed) const;
+};
 
 #endif // INCLUDE_NMT
 
-#endif // SHARE_VM_SERVICES_MEM_REPORTER_HPP
+#endif
+
--- a/hotspot/src/share/vm/services/memSnapshot.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,748 +0,0 @@
-/*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "utilities/decoder.hpp"
-#include "services/memBaseline.hpp"
-#include "services/memPtr.hpp"
-#include "services/memPtrArray.hpp"
-#include "services/memSnapshot.hpp"
-#include "services/memTracker.hpp"
-
-PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
-
-#ifdef ASSERT
-
-void decode_pointer_record(MemPointerRecord* rec) {
-  tty->print("Pointer: [" PTR_FORMAT " - " PTR_FORMAT  "] size = %d bytes", rec->addr(),
-    rec->addr() + rec->size(), (int)rec->size());
-  tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
-  if (rec->is_vm_pointer()) {
-    if (rec->is_allocation_record()) {
-      tty->print_cr(" (reserve)");
-    } else if (rec->is_commit_record()) {
-      tty->print_cr(" (commit)");
-    } else if (rec->is_uncommit_record()) {
-      tty->print_cr(" (uncommit)");
-    } else if (rec->is_deallocation_record()) {
-      tty->print_cr(" (release)");
-    } else {
-      tty->print_cr(" (tag)");
-    }
-  } else {
-    if (rec->is_arena_memory_record()) {
-      tty->print_cr(" (arena size)");
-    } else if (rec->is_allocation_record()) {
-      tty->print_cr(" (malloc)");
-    } else {
-      tty->print_cr(" (free)");
-    }
-  }
-  if (MemTracker::track_callsite()) {
-    char buf[1024];
-    address pc = ((MemPointerRecordEx*)rec)->pc();
-    if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
-      tty->print_cr("\tfrom %s", buf);
-    } else {
-      tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
-    }
-  }
-}
-
-void decode_vm_region_record(VMMemRegion* rec) {
-  tty->print("VM Region [" PTR_FORMAT " - " PTR_FORMAT "]", rec->addr(),
-    rec->addr() + rec->size());
-  tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
-  if (rec->is_allocation_record()) {
-    tty->print_cr(" (reserved)");
-  } else if (rec->is_commit_record()) {
-    tty->print_cr(" (committed)");
-  } else {
-    ShouldNotReachHere();
-  }
-  if (MemTracker::track_callsite()) {
-    char buf[1024];
-    address pc = ((VMMemRegionEx*)rec)->pc();
-    if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
-      tty->print_cr("\tfrom %s", buf);
-    } else {
-      tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
-    }
-
-  }
-}
-
-#endif
-
-
-bool VMMemPointerIterator::insert_record(MemPointerRecord* rec) {
-  VMMemRegionEx new_rec;
-  assert(rec->is_allocation_record() || rec->is_commit_record(),
-    "Sanity check");
-  if (MemTracker::track_callsite()) {
-    new_rec.init((MemPointerRecordEx*)rec);
-  } else {
-    new_rec.init(rec);
-  }
-  return insert(&new_rec);
-}
-
-bool VMMemPointerIterator::insert_record_after(MemPointerRecord* rec) {
-  VMMemRegionEx new_rec;
-  assert(rec->is_allocation_record() || rec->is_commit_record(),
-    "Sanity check");
-  if (MemTracker::track_callsite()) {
-    new_rec.init((MemPointerRecordEx*)rec);
-  } else {
-    new_rec.init(rec);
-  }
-  return insert_after(&new_rec);
-}
-
-// we don't consolidate reserved regions, since they may be categorized
-// in different types.
-bool VMMemPointerIterator::add_reserved_region(MemPointerRecord* rec) {
-  assert(rec->is_allocation_record(), "Sanity check");
-  VMMemRegion* reserved_region = (VMMemRegion*)current();
-
-  // we don't have anything yet
-  if (reserved_region == NULL) {
-    return insert_record(rec);
-  }
-
-  assert(reserved_region->is_reserved_region(), "Sanity check");
-  // duplicated records
-  if (reserved_region->is_same_region(rec)) {
-    return true;
-  }
-  // Overlapping stack regions indicate that a JNI thread failed to
-  // detach from the VM before exiting. This leaks the JavaThread object.
-  if (CheckJNICalls)  {
-      guarantee(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) != mtThreadStack ||
-         !reserved_region->overlaps_region(rec),
-         "Attached JNI thread exited without being detached");
-  }
-  // otherwise, we should not have overlapping reserved regions
-  assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack ||
-    reserved_region->base() > rec->addr(), "Just check: locate()");
-  assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack ||
-    !reserved_region->overlaps_region(rec), "overlapping reserved regions");
-
-  return insert_record(rec);
-}
-
-// we do consolidate committed regions
-bool VMMemPointerIterator::add_committed_region(MemPointerRecord* rec) {
-  assert(rec->is_commit_record(), "Sanity check");
-  VMMemRegion* reserved_rgn = (VMMemRegion*)current();
-  assert(reserved_rgn->is_reserved_region() && reserved_rgn->contains_region(rec),
-    "Sanity check");
-
-  // thread's native stack is always marked as "committed", ignore
-  // the "commit" operation for creating stack guard pages
-  if (FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) == mtThreadStack &&
-      FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
-    return true;
-  }
-
-  // if the reserved region has any committed regions
-  VMMemRegion* committed_rgn  = (VMMemRegion*)next();
-  while (committed_rgn != NULL && committed_rgn->is_committed_region()) {
-    // duplicated commit records
-    if(committed_rgn->contains_region(rec)) {
-      return true;
-    } else if (committed_rgn->overlaps_region(rec)) {
-      // overlaps front part
-      if (rec->addr() < committed_rgn->addr()) {
-        committed_rgn->expand_region(rec->addr(),
-          committed_rgn->addr() - rec->addr());
-      } else {
-        // overlaps tail part
-        address committed_rgn_end = committed_rgn->addr() +
-              committed_rgn->size();
-        assert(committed_rgn_end < rec->addr() + rec->size(),
-             "overlap tail part");
-        committed_rgn->expand_region(committed_rgn_end,
-          (rec->addr() + rec->size()) - committed_rgn_end);
-      }
-    } else if (committed_rgn->base() + committed_rgn->size() == rec->addr()) {
-      // adjunct each other
-      committed_rgn->expand_region(rec->addr(), rec->size());
-      VMMemRegion* next_reg = (VMMemRegion*)next();
-      // see if we can consolidate next committed region
-      if (next_reg != NULL && next_reg->is_committed_region() &&
-        next_reg->base() == committed_rgn->base() + committed_rgn->size()) {
-          committed_rgn->expand_region(next_reg->base(), next_reg->size());
-          // delete merged region
-          remove();
-      }
-      return true;
-    } else if (committed_rgn->base() > rec->addr()) {
-      // found the location, insert this committed region
-      return insert_record(rec);
-    }
-    committed_rgn = (VMMemRegion*)next();
-  }
-  return insert_record(rec);
-}
-
-bool VMMemPointerIterator::remove_uncommitted_region(MemPointerRecord* rec) {
-  assert(rec->is_uncommit_record(), "sanity check");
-  VMMemRegion* cur;
-  cur = (VMMemRegion*)current();
-  assert(cur->is_reserved_region() && cur->contains_region(rec),
-    "Sanity check");
-  // thread's native stack is always marked as "committed", ignore
-  // the "commit" operation for creating stack guard pages
-  if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack &&
-      FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
-    return true;
-  }
-
-  cur = (VMMemRegion*)next();
-  while (cur != NULL && cur->is_committed_region()) {
-    // region already uncommitted, must be due to duplicated record
-    if (cur->addr() >= rec->addr() + rec->size()) {
-      break;
-    } else if (cur->contains_region(rec)) {
-      // uncommit whole region
-      if (cur->is_same_region(rec)) {
-        remove();
-        break;
-      } else if (rec->addr() == cur->addr() ||
-        rec->addr() + rec->size() == cur->addr() + cur->size()) {
-        // uncommitted from either end of current memory region.
-        cur->exclude_region(rec->addr(), rec->size());
-        break;
-      } else { // split the committed region and release the middle
-        address high_addr = cur->addr() + cur->size();
-        size_t sz = high_addr - rec->addr();
-        cur->exclude_region(rec->addr(), sz);
-        sz = high_addr - (rec->addr() + rec->size());
-        if (MemTracker::track_callsite()) {
-          MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
-             ((VMMemRegionEx*)cur)->pc());
-          return insert_record_after(&tmp);
-        } else {
-          MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
-          return insert_record_after(&tmp);
-        }
-      }
-    }
-    cur = (VMMemRegion*)next();
-  }
-
-  // we may not find committed record due to duplicated records
-  return true;
-}
-
-bool VMMemPointerIterator::remove_released_region(MemPointerRecord* rec) {
-  assert(rec->is_deallocation_record(), "Sanity check");
-  VMMemRegion* cur = (VMMemRegion*)current();
-  assert(cur->is_reserved_region() && cur->contains_region(rec),
-    "Sanity check");
-  if (rec->is_same_region(cur)) {
-
-    // In snapshot, the virtual memory records are sorted in following orders:
-    // 1. virtual memory's base address
-    // 2. virtual memory reservation record, followed by commit records within this reservation.
-    //    The commit records are also in base address order.
-    // When a reserved region is released, we want to remove the reservation record and all
-    // commit records following it.
-#ifdef ASSERT
-    address low_addr = cur->addr();
-    address high_addr = low_addr + cur->size();
-#endif
-    // remove virtual memory reservation record
-    remove();
-    // remove committed regions within above reservation
-    VMMemRegion* next_region = (VMMemRegion*)current();
-    while (next_region != NULL && next_region->is_committed_region()) {
-      assert(next_region->addr() >= low_addr &&
-             next_region->addr() + next_region->size() <= high_addr,
-            "Range check");
-      remove();
-      next_region = (VMMemRegion*)current();
-    }
-  } else if (rec->addr() == cur->addr() ||
-    rec->addr() + rec->size() == cur->addr() + cur->size()) {
-    // released region is at either end of this region
-    cur->exclude_region(rec->addr(), rec->size());
-    assert(check_reserved_region(), "Integrity check");
-  } else { // split the reserved region and release the middle
-    address high_addr = cur->addr() + cur->size();
-    size_t sz = high_addr - rec->addr();
-    cur->exclude_region(rec->addr(), sz);
-    sz = high_addr - rec->addr() - rec->size();
-    if (MemTracker::track_callsite()) {
-      MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
-        ((VMMemRegionEx*)cur)->pc());
-      bool ret = insert_reserved_region(&tmp);
-      assert(!ret || check_reserved_region(), "Integrity check");
-      return ret;
-    } else {
-      MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
-      bool ret = insert_reserved_region(&tmp);
-      assert(!ret || check_reserved_region(), "Integrity check");
-      return ret;
-    }
-  }
-  return true;
-}
-
-bool VMMemPointerIterator::insert_reserved_region(MemPointerRecord* rec) {
-  // skip all 'commit' records associated with previous reserved region
-  VMMemRegion* p = (VMMemRegion*)next();
-  while (p != NULL && p->is_committed_region() &&
-         p->base() + p->size() < rec->addr()) {
-    p = (VMMemRegion*)next();
-  }
-  return insert_record(rec);
-}
-
-bool VMMemPointerIterator::split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size) {
-  assert(rgn->contains_region(new_rgn_addr, new_rgn_size), "Not fully contained");
-  address pc = (MemTracker::track_callsite() ? ((VMMemRegionEx*)rgn)->pc() : NULL);
-  if (rgn->base() == new_rgn_addr) { // new region is at the beginning of the region
-    size_t sz = rgn->size() - new_rgn_size;
-    // the original region becomes 'new' region
-    rgn->exclude_region(new_rgn_addr + new_rgn_size, sz);
-     // remaining becomes next region
-    MemPointerRecordEx next_rgn(new_rgn_addr + new_rgn_size, rgn->flags(), sz, pc);
-    return insert_reserved_region(&next_rgn);
-  } else if (rgn->base() + rgn->size() == new_rgn_addr + new_rgn_size) {
-    rgn->exclude_region(new_rgn_addr, new_rgn_size);
-    MemPointerRecordEx next_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
-    return insert_reserved_region(&next_rgn);
-  } else {
-    // the orginal region will be split into three
-    address rgn_high_addr = rgn->base() + rgn->size();
-    // first region
-    rgn->exclude_region(new_rgn_addr, (rgn_high_addr - new_rgn_addr));
-    // the second region is the new region
-    MemPointerRecordEx new_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
-    if (!insert_reserved_region(&new_rgn)) return false;
-    // the remaining region
-    MemPointerRecordEx rem_rgn(new_rgn_addr + new_rgn_size, rgn->flags(),
-      rgn_high_addr - (new_rgn_addr + new_rgn_size), pc);
-    return insert_reserved_region(&rem_rgn);
-  }
-}
-
-static int sort_in_seq_order(const void* p1, const void* p2) {
-  assert(p1 != NULL && p2 != NULL, "Sanity check");
-  const MemPointerRecord* mp1 = (MemPointerRecord*)p1;
-  const MemPointerRecord* mp2 = (MemPointerRecord*)p2;
-  return (mp1->seq() - mp2->seq());
-}
-
-bool StagingArea::init() {
-  if (MemTracker::track_callsite()) {
-    _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
-    _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
-  } else {
-    _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
-    _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
-  }
-
-  if (_malloc_data != NULL && _vm_data != NULL &&
-      !_malloc_data->out_of_memory() &&
-      !_vm_data->out_of_memory()) {
-    return true;
-  } else {
-    if (_malloc_data != NULL) delete _malloc_data;
-    if (_vm_data != NULL) delete _vm_data;
-    _malloc_data = NULL;
-    _vm_data = NULL;
-    return false;
-  }
-}
-
-
-VMRecordIterator StagingArea::virtual_memory_record_walker() {
-  MemPointerArray* arr = vm_data();
-  // sort into seq number order
-  arr->sort((FN_SORT)sort_in_seq_order);
-  return VMRecordIterator(arr);
-}
-
-
-MemSnapshot::MemSnapshot() {
-  if (MemTracker::track_callsite()) {
-    _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>();
-    _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true);
-  } else {
-    _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>();
-    _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true);
-  }
-
-  _staging_area.init();
-  _lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock");
-  NOT_PRODUCT(_untracked_count = 0;)
-  _number_of_classes = 0;
-}
-
-MemSnapshot::~MemSnapshot() {
-  assert(MemTracker::shutdown_in_progress(), "native memory tracking still on");
-  {
-    MutexLockerEx locker(_lock);
-    if (_alloc_ptrs != NULL) {
-      delete _alloc_ptrs;
-      _alloc_ptrs = NULL;
-    }
-
-    if (_vm_ptrs != NULL) {
-      delete _vm_ptrs;
-      _vm_ptrs = NULL;
-    }
-  }
-
-  if (_lock != NULL) {
-    delete _lock;
-    _lock = NULL;
-  }
-}
-
-
-void MemSnapshot::copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
-  assert(dest != NULL && src != NULL, "Just check");
-  assert(dest->addr() == src->addr(), "Just check");
-  assert(dest->seq() > 0 && src->seq() > 0, "not sequenced");
-
-  if (MemTracker::track_callsite()) {
-    *(SeqMemPointerRecordEx*)dest = *(SeqMemPointerRecordEx*)src;
-  } else {
-    *(SeqMemPointerRecord*)dest = *(SeqMemPointerRecord*)src;
-  }
-}
-
-void MemSnapshot::assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src) {
-  assert(src != NULL && dest != NULL, "Just check");
-  assert(dest->seq() == 0 && src->seq() >0, "cast away sequence");
-
-  if (MemTracker::track_callsite()) {
-    *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src;
-  } else {
-    *(MemPointerRecord*)dest = *(MemPointerRecord*)src;
-  }
-}
-
-// merge a recorder to the staging area
-bool MemSnapshot::merge(MemRecorder* rec) {
-  assert(rec != NULL && !rec->out_of_memory(), "Just check");
-
-  SequencedRecordIterator itr(rec->pointer_itr());
-
-  MutexLockerEx lock(_lock, true);
-  MemPointerIterator malloc_staging_itr(_staging_area.malloc_data());
-  MemPointerRecord* incoming_rec = (MemPointerRecord*) itr.current();
-  MemPointerRecord* matched_rec;
-
-  while (incoming_rec != NULL) {
-    if (incoming_rec->is_vm_pointer()) {
-      // we don't do anything with virtual memory records during merge
-      if (!_staging_area.vm_data()->append(incoming_rec)) {
-        return false;
-      }
-    } else {
-      // locate matched record and/or also position the iterator to proper
-      // location for this incoming record.
-      matched_rec = (MemPointerRecord*)malloc_staging_itr.locate(incoming_rec->addr());
-      // we have not seen this memory block in this generation,
-      // so just add to staging area
-      if (matched_rec == NULL) {
-        if (!malloc_staging_itr.insert(incoming_rec)) {
-          return false;
-        }
-      } else if (incoming_rec->addr() == matched_rec->addr()) {
-        // whoever has higher sequence number wins
-        if (incoming_rec->seq() > matched_rec->seq()) {
-          copy_seq_pointer(matched_rec, incoming_rec);
-        }
-      } else if (incoming_rec->addr() < matched_rec->addr()) {
-        if (!malloc_staging_itr.insert(incoming_rec)) {
-          return false;
-        }
-      } else {
-        ShouldNotReachHere();
-      }
-    }
-    incoming_rec = (MemPointerRecord*)itr.next();
-  }
-  NOT_PRODUCT(void check_staging_data();)
-  return true;
-}
-
-
-// promote data to next generation
-bool MemSnapshot::promote(int number_of_classes) {
-  assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
-  assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL,
-         "Just check");
-  MutexLockerEx lock(_lock, true);
-
-  MallocRecordIterator  malloc_itr = _staging_area.malloc_record_walker();
-  bool promoted = false;
-  if (promote_malloc_records(&malloc_itr)) {
-    VMRecordIterator vm_itr = _staging_area.virtual_memory_record_walker();
-    if (promote_virtual_memory_records(&vm_itr)) {
-      promoted = true;
-    }
-  }
-
-  NOT_PRODUCT(check_malloc_pointers();)
-  _staging_area.clear();
-  _number_of_classes = number_of_classes;
-  return promoted;
-}
-
-bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
-  MemPointerIterator malloc_snapshot_itr(_alloc_ptrs);
-  MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
-  MemPointerRecord* matched_rec;
-  while (new_rec != NULL) {
-    matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr());
-    // found matched memory block
-    if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
-      // snapshot already contains 'live' records
-      assert(matched_rec->is_allocation_record() || matched_rec->is_arena_memory_record(),
-             "Sanity check");
-      // update block states
-      if (new_rec->is_allocation_record()) {
-        assign_pointer(matched_rec, new_rec);
-      } else if (new_rec->is_arena_memory_record()) {
-        if (new_rec->size() == 0) {
-          // remove size record once size drops to 0
-          malloc_snapshot_itr.remove();
-        } else {
-          assign_pointer(matched_rec, new_rec);
-        }
-      } else {
-        // a deallocation record
-        assert(new_rec->is_deallocation_record(), "Sanity check");
-        // an arena record can be followed by a size record, we need to remove both
-        if (matched_rec->is_arena_record()) {
-          MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next();
-          if (next != NULL && next->is_arena_memory_record() &&
-              next->is_memory_record_of_arena(matched_rec)) {
-            malloc_snapshot_itr.remove();
-          }
-        }
-        // the memory is deallocated, remove related record(s)
-        malloc_snapshot_itr.remove();
-      }
-    } else {
-      // don't insert size 0 record
-      if (new_rec->is_arena_memory_record() && new_rec->size() == 0) {
-        new_rec = NULL;
-      }
-
-      if (new_rec != NULL) {
-        if  (new_rec->is_allocation_record() || new_rec->is_arena_memory_record()) {
-          if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) {
-            if (!malloc_snapshot_itr.insert_after(new_rec)) {
-              return false;
-            }
-          } else {
-            if (!malloc_snapshot_itr.insert(new_rec)) {
-              return false;
-            }
-          }
-        }
-#ifndef PRODUCT
-        else if (!has_allocation_record(new_rec->addr())) {
-          // NMT can not track some startup memory, which is allocated before NMT is on
-          _untracked_count ++;
-        }
-#endif
-      }
-    }
-    new_rec = (MemPointerRecord*)itr->next();
-  }
-  return true;
-}
-
-bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) {
-  VMMemPointerIterator vm_snapshot_itr(_vm_ptrs);
-  MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
-  VMMemRegion*  reserved_rec;
-  while (new_rec != NULL) {
-    assert(new_rec->is_vm_pointer(), "Sanity check");
-
-    // locate a reserved region that contains the specified address, or
-    // the nearest reserved region has base address just above the specified
-    // address
-    reserved_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
-    if (reserved_rec != NULL && reserved_rec->contains_region(new_rec)) {
-      // snapshot can only have 'live' records
-      assert(reserved_rec->is_reserved_region(), "Sanity check");
-      if (new_rec->is_allocation_record()) {
-        if (!reserved_rec->is_same_region(new_rec)) {
-          // only deal with split a bigger reserved region into smaller regions.
-          // So far, CDS is the only use case.
-          if (!vm_snapshot_itr.split_reserved_region(reserved_rec, new_rec->addr(), new_rec->size())) {
-            return false;
-          }
-        }
-      } else if (new_rec->is_uncommit_record()) {
-        if (!vm_snapshot_itr.remove_uncommitted_region(new_rec)) {
-          return false;
-        }
-      } else if (new_rec->is_commit_record()) {
-        // insert or expand existing committed region to cover this
-        // newly committed region
-        if (!vm_snapshot_itr.add_committed_region(new_rec)) {
-          return false;
-        }
-      } else if (new_rec->is_deallocation_record()) {
-        // release part or all memory region
-        if (!vm_snapshot_itr.remove_released_region(new_rec)) {
-          return false;
-        }
-      } else if (new_rec->is_type_tagging_record()) {
-        // tag this reserved virtual memory range to a memory type. Can not re-tag a memory range
-        // to different type.
-        assert(FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == mtNone ||
-               FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_rec->flags()),
-               "Sanity check");
-        reserved_rec->tag(new_rec->flags());
-    } else {
-        ShouldNotReachHere();
-          }
-        } else {
-      /*
-       * The assertion failure indicates mis-matched virtual memory records. The likely
-       * scenario is, that some virtual memory operations are not going through os::xxxx_memory()
-       * api, which have to be tracked manually. (perfMemory is an example).
-      */
-      assert(new_rec->is_allocation_record(), "Sanity check");
-      if (!vm_snapshot_itr.add_reserved_region(new_rec)) {
-            return false;
-          }
-  }
-    new_rec = (MemPointerRecord*)itr->next();
-  }
-  return true;
-}
-
-#ifndef PRODUCT
-void MemSnapshot::print_snapshot_stats(outputStream* st) {
-  st->print_cr("Snapshot:");
-  st->print_cr("\tMalloced: %d/%d [%5.2f%%]  %dKB", _alloc_ptrs->length(), _alloc_ptrs->capacity(),
-    (100.0 * (float)_alloc_ptrs->length()) / (float)_alloc_ptrs->capacity(), _alloc_ptrs->instance_size()/K);
-
-  st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(),
-    (100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K);
-
-  st->print_cr("\tMalloc staging Area:     %d/%d [%5.2f%%] %dKB", _staging_area.malloc_data()->length(),
-    _staging_area.malloc_data()->capacity(),
-    (100.0 * (float)_staging_area.malloc_data()->length()) / (float)_staging_area.malloc_data()->capacity(),
-    _staging_area.malloc_data()->instance_size()/K);
-
-  st->print_cr("\tVirtual memory staging Area:     %d/%d [%5.2f%%] %dKB", _staging_area.vm_data()->length(),
-    _staging_area.vm_data()->capacity(),
-    (100.0 * (float)_staging_area.vm_data()->length()) / (float)_staging_area.vm_data()->capacity(),
-    _staging_area.vm_data()->instance_size()/K);
-
-  st->print_cr("\tUntracked allocation: %d", _untracked_count);
-}
-
-void MemSnapshot::check_malloc_pointers() {
-  MemPointerArrayIteratorImpl mItr(_alloc_ptrs);
-  MemPointerRecord* p = (MemPointerRecord*)mItr.current();
-  MemPointerRecord* prev = NULL;
-  while (p != NULL) {
-    if (prev != NULL) {
-      assert(p->addr() >= prev->addr(), "sorting order");
-    }
-    prev = p;
-    p = (MemPointerRecord*)mItr.next();
-  }
-}
-
-bool MemSnapshot::has_allocation_record(address addr) {
-  MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
-  MemPointerRecord* cur = (MemPointerRecord*)itr.current();
-  while (cur != NULL) {
-    if (cur->addr() == addr && cur->is_allocation_record()) {
-      return true;
-    }
-    cur = (MemPointerRecord*)itr.next();
-  }
-  return false;
-}
-#endif // PRODUCT
-
-#ifdef ASSERT
-void MemSnapshot::check_staging_data() {
-  MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
-  MemPointerRecord* cur = (MemPointerRecord*)itr.current();
-  MemPointerRecord* next = (MemPointerRecord*)itr.next();
-  while (next != NULL) {
-    assert((next->addr() > cur->addr()) ||
-      ((next->flags() & MemPointerRecord::tag_masks) >
-       (cur->flags() & MemPointerRecord::tag_masks)),
-       "sorting order");
-    cur = next;
-    next = (MemPointerRecord*)itr.next();
-  }
-
-  MemPointerArrayIteratorImpl vm_itr(_staging_area.vm_data());
-  cur = (MemPointerRecord*)vm_itr.current();
-  while (cur != NULL) {
-    assert(cur->is_vm_pointer(), "virtual memory pointer only");
-    cur = (MemPointerRecord*)vm_itr.next();
-  }
-}
-
-void MemSnapshot::dump_all_vm_pointers() {
-  MemPointerArrayIteratorImpl itr(_vm_ptrs);
-  VMMemRegion* ptr = (VMMemRegion*)itr.current();
-  tty->print_cr("dump virtual memory pointers:");
-  while (ptr != NULL) {
-    if (ptr->is_committed_region()) {
-      tty->print("\t");
-    }
-    tty->print("[" PTR_FORMAT " - " PTR_FORMAT "] [%x]", ptr->addr(),
-      (ptr->addr() + ptr->size()), ptr->flags());
-
-    if (MemTracker::track_callsite()) {
-      VMMemRegionEx* ex = (VMMemRegionEx*)ptr;
-      if (ex->pc() != NULL) {
-        char buf[1024];
-        if (os::dll_address_to_function_name(ex->pc(), buf, sizeof(buf), NULL)) {
-          tty->print_cr("\t%s", buf);
-        } else {
-          tty->cr();
-        }
-      }
-    }
-
-    ptr = (VMMemRegion*)itr.next();
-  }
-  tty->flush();
-}
-#endif // ASSERT
-
--- a/hotspot/src/share/vm/services/memSnapshot.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,408 +0,0 @@
-/*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
-#define SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
-
-#include "memory/allocation.hpp"
-#include "runtime/mutex.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "services/memBaseline.hpp"
-#include "services/memPtrArray.hpp"
-
-// Snapshot pointer array iterator
-
-// The pointer array contains malloc-ed pointers
-class MemPointerIterator : public MemPointerArrayIteratorImpl {
- public:
-  MemPointerIterator(MemPointerArray* arr):
-    MemPointerArrayIteratorImpl(arr) {
-    assert(arr != NULL, "null array");
-  }
-
-#ifdef ASSERT
-  virtual bool is_dup_pointer(const MemPointer* ptr1,
-    const MemPointer* ptr2) const {
-    MemPointerRecord* p1 = (MemPointerRecord*)ptr1;
-    MemPointerRecord* p2 = (MemPointerRecord*)ptr2;
-
-    if (p1->addr() != p2->addr()) return false;
-    if ((p1->flags() & MemPointerRecord::tag_masks) !=
-        (p2->flags() & MemPointerRecord::tag_masks)) {
-      return false;
-    }
-    // we do see multiple commit/uncommit on the same memory, it is ok
-    return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
-           (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
-  }
-
-  virtual bool insert(MemPointer* ptr) {
-    if (_pos > 0) {
-      MemPointer* p1 = (MemPointer*)ptr;
-      MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
-      assert(!is_dup_pointer(p1, p2),
-        err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
-    }
-     if (_pos < _array->length() -1) {
-      MemPointer* p1 = (MemPointer*)ptr;
-      MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
-      assert(!is_dup_pointer(p1, p2),
-        err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
-     }
-    return _array->insert_at(ptr, _pos);
-  }
-
-  virtual bool insert_after(MemPointer* ptr) {
-    if (_pos > 0) {
-      MemPointer* p1 = (MemPointer*)ptr;
-      MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
-      assert(!is_dup_pointer(p1, p2),
-        err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
-    }
-    if (_pos < _array->length() - 1) {
-      MemPointer* p1 = (MemPointer*)ptr;
-      MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
-
-      assert(!is_dup_pointer(p1, p2),
-        err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
-     }
-    if (_array->insert_at(ptr, _pos + 1)) {
-      _pos ++;
-      return true;
-    }
-    return false;
-  }
-#endif
-
-  virtual MemPointer* locate(address addr) {
-    MemPointer* cur = current();
-    while (cur != NULL && cur->addr() < addr) {
-      cur = next();
-    }
-    return cur;
-  }
-};
-
-class VMMemPointerIterator : public MemPointerIterator {
- public:
-  VMMemPointerIterator(MemPointerArray* arr):
-      MemPointerIterator(arr) {
-  }
-
-  // locate an existing reserved memory region that contains specified address,
-  // or the reserved region just above this address, where the incoming
-  // reserved region should be inserted.
-  virtual MemPointer* locate(address addr) {
-    reset();
-    VMMemRegion* reg = (VMMemRegion*)current();
-    while (reg != NULL) {
-      if (reg->is_reserved_region()) {
-        if (reg->contains_address(addr) || addr < reg->base()) {
-          return reg;
-      }
-    }
-      reg = (VMMemRegion*)next();
-    }
-      return NULL;
-    }
-
-  // following methods update virtual memory in the context
-  // of 'current' position, which is properly positioned by
-  // callers via locate method.
-  bool add_reserved_region(MemPointerRecord* rec);
-  bool add_committed_region(MemPointerRecord* rec);
-  bool remove_uncommitted_region(MemPointerRecord* rec);
-  bool remove_released_region(MemPointerRecord* rec);
-
-  // split a reserved region to create a new memory region with specified base and size
-  bool split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size);
- private:
-  bool insert_record(MemPointerRecord* rec);
-  bool insert_record_after(MemPointerRecord* rec);
-
-  bool insert_reserved_region(MemPointerRecord* rec);
-
-  // reset current position
-  inline void reset() { _pos = 0; }
-#ifdef ASSERT
-  // check integrity of records on current reserved memory region.
-  bool check_reserved_region() {
-    VMMemRegion* reserved_region = (VMMemRegion*)current();
-    assert(reserved_region != NULL && reserved_region->is_reserved_region(),
-          "Sanity check");
-    // all committed regions that follow current reserved region, should all
-    // belong to the reserved region.
-    VMMemRegion* next_region = (VMMemRegion*)next();
-    for (; next_region != NULL && next_region->is_committed_region();
-         next_region = (VMMemRegion*)next() ) {
-      if(!reserved_region->contains_region(next_region)) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  virtual bool is_dup_pointer(const MemPointer* ptr1,
-    const MemPointer* ptr2) const {
-    VMMemRegion* p1 = (VMMemRegion*)ptr1;
-    VMMemRegion* p2 = (VMMemRegion*)ptr2;
-
-    if (p1->addr() != p2->addr()) return false;
-    if ((p1->flags() & MemPointerRecord::tag_masks) !=
-        (p2->flags() & MemPointerRecord::tag_masks)) {
-      return false;
-    }
-    // we do see multiple commit/uncommit on the same memory, it is ok
-    return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
-           (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
-  }
-#endif
-};
-
-class MallocRecordIterator : public MemPointerArrayIterator {
- private:
-  MemPointerArrayIteratorImpl  _itr;
-
-
-
- public:
-  MallocRecordIterator(MemPointerArray* arr) : _itr(arr) {
-  }
-
-  virtual MemPointer* current() const {
-#ifdef ASSERT
-    MemPointer* cur_rec = _itr.current();
-    if (cur_rec != NULL) {
-      MemPointer* prev_rec = _itr.peek_prev();
-      MemPointer* next_rec = _itr.peek_next();
-      assert(prev_rec == NULL || prev_rec->addr() < cur_rec->addr(), "Sorting order");
-      assert(next_rec == NULL || next_rec->addr() > cur_rec->addr(), "Sorting order");
-    }
-#endif
-    return _itr.current();
-  }
-  virtual MemPointer* next() {
-    MemPointerRecord* next_rec = (MemPointerRecord*)_itr.next();
-    // arena memory record is a special case, which we have to compare
-    // sequence number against its associated arena record.
-    if (next_rec != NULL && next_rec->is_arena_memory_record()) {
-      MemPointerRecord* prev_rec = (MemPointerRecord*)_itr.peek_prev();
-      // if there is an associated arena record, it has to be previous
-      // record because of sorting order (by address) - NMT generates a pseudo address
-      // for arena's size record by offsetting arena's address, that guarantees
-      // the order of arena record and it's size record.
-      if (prev_rec != NULL && prev_rec->is_arena_record() &&
-        next_rec->is_memory_record_of_arena(prev_rec)) {
-        if (prev_rec->seq() > next_rec->seq()) {
-          // Skip this arena memory record
-          // Two scenarios:
-          //   - if the arena record is an allocation record, this early
-          //     size record must be leftover by previous arena,
-          //     and the last size record should have size = 0.
-          //   - if the arena record is a deallocation record, this
-          //     size record should be its cleanup record, which should
-          //     also have size = 0. In other world, arena alway reset
-          //     its size before gone (see Arena's destructor)
-          assert(next_rec->size() == 0, "size not reset");
-          return _itr.next();
-        } else {
-          assert(prev_rec->is_allocation_record(),
-            "Arena size record ahead of allocation record");
-        }
-      }
-    }
-    return next_rec;
-  }
-
-  MemPointer* peek_next() const      { ShouldNotReachHere(); return NULL; }
-  MemPointer* peek_prev() const      { ShouldNotReachHere(); return NULL; }
-  void remove()                      { ShouldNotReachHere(); }
-  bool insert(MemPointer* ptr)       { ShouldNotReachHere(); return false; }
-  bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
-};
-
-// collapse duplicated records. Eliminating duplicated records here, is much
-// cheaper than during promotion phase. However, it does have limitation - it
-// can only eliminate duplicated records within the generation, there are
-// still chances seeing duplicated records during promotion.
-// We want to use the record with higher sequence number, because it has
-// more accurate callsite pc.
-class VMRecordIterator : public MemPointerArrayIterator {
- private:
-  MemPointerArrayIteratorImpl  _itr;
-
- public:
-  VMRecordIterator(MemPointerArray* arr) : _itr(arr) {
-    MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
-    MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
-    while (next != NULL) {
-      assert(cur != NULL, "Sanity check");
-      assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
-        "pre-sort order");
-
-      if (is_duplicated_record(cur, next)) {
-        _itr.next();
-        next = (MemPointerRecord*)_itr.peek_next();
-      } else {
-        break;
-      }
-    }
-  }
-
-  virtual MemPointer* current() const {
-    return _itr.current();
-  }
-
-  // get next record, but skip the duplicated records
-  virtual MemPointer* next() {
-    MemPointerRecord* cur = (MemPointerRecord*)_itr.next();
-    MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
-    while (next != NULL) {
-      assert(cur != NULL, "Sanity check");
-      assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
-        "pre-sort order");
-
-      if (is_duplicated_record(cur, next)) {
-        _itr.next();
-        cur = next;
-        next = (MemPointerRecord*)_itr.peek_next();
-      } else {
-        break;
-      }
-    }
-    return cur;
-  }
-
-  MemPointer* peek_next() const      { ShouldNotReachHere(); return NULL; }
-  MemPointer* peek_prev() const      { ShouldNotReachHere(); return NULL; }
-  void remove()                      { ShouldNotReachHere(); }
-  bool insert(MemPointer* ptr)       { ShouldNotReachHere(); return false; }
-  bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
-
- private:
-  bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const {
-    bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags());
-    assert(!(ret && FLAGS_TO_MEMORY_TYPE(p1->flags()) == mtThreadStack), "dup on stack record");
-    return ret;
-  }
-};
-
-class StagingArea VALUE_OBJ_CLASS_SPEC {
- private:
-  MemPointerArray*   _malloc_data;
-  MemPointerArray*   _vm_data;
-
- public:
-  StagingArea() : _malloc_data(NULL), _vm_data(NULL) {
-    init();
-  }
-
-  ~StagingArea() {
-    if (_malloc_data != NULL) delete _malloc_data;
-    if (_vm_data != NULL) delete _vm_data;
-  }
-
-  MallocRecordIterator malloc_record_walker() {
-    return MallocRecordIterator(malloc_data());
-  }
-
-  VMRecordIterator virtual_memory_record_walker();
-
-  bool init();
-  void clear() {
-    assert(_malloc_data != NULL && _vm_data != NULL, "Just check");
-    _malloc_data->shrink();
-    _malloc_data->clear();
-    _vm_data->clear();
-  }
-
-  inline MemPointerArray* malloc_data() { return _malloc_data; }
-  inline MemPointerArray* vm_data()     { return _vm_data; }
-};
-
-class MemBaseline;
-class MemSnapshot : public CHeapObj<mtNMT> {
- private:
-  // the following two arrays contain records of all known lived memory blocks
-  // live malloc-ed memory pointers
-  MemPointerArray*      _alloc_ptrs;
-  // live virtual memory pointers
-  MemPointerArray*      _vm_ptrs;
-
-  StagingArea           _staging_area;
-
-  // the lock to protect this snapshot
-  Monitor*              _lock;
-
-  // the number of instance classes
-  int                   _number_of_classes;
-
-  NOT_PRODUCT(size_t    _untracked_count;)
-  friend class MemBaseline;
-
- public:
-  MemSnapshot();
-  virtual ~MemSnapshot();
-
-  // if we are running out of native memory
-  bool out_of_memory() {
-    return (_alloc_ptrs == NULL ||
-      _staging_area.malloc_data() == NULL ||
-      _staging_area.vm_data() == NULL ||
-      _vm_ptrs == NULL || _lock == NULL ||
-      _alloc_ptrs->out_of_memory() ||
-      _vm_ptrs->out_of_memory());
-  }
-
-  // merge a per-thread memory recorder into staging area
-  bool merge(MemRecorder* rec);
-  // promote staged data to snapshot
-  bool promote(int number_of_classes);
-
-  int  number_of_classes() const { return _number_of_classes; }
-
-  void wait(long timeout) {
-    assert(_lock != NULL, "Just check");
-    MonitorLockerEx locker(_lock);
-    locker.wait(true, timeout);
-  }
-
-  NOT_PRODUCT(void print_snapshot_stats(outputStream* st);)
-  NOT_PRODUCT(void check_staging_data();)
-  NOT_PRODUCT(void check_malloc_pointers();)
-  NOT_PRODUCT(bool has_allocation_record(address addr);)
-  // dump all virtual memory pointers in snapshot
-  DEBUG_ONLY( void dump_all_vm_pointers();)
-
- private:
-   // copy sequenced pointer from src to dest
-   void copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
-   // assign a sequenced pointer to non-sequenced pointer
-   void assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src);
-
-   bool promote_malloc_records(MemPointerArrayIterator* itr);
-   bool promote_virtual_memory_records(MemPointerArrayIterator* itr);
-};
-
-#endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
--- a/hotspot/src/share/vm/services/memTrackWorker.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,212 +0,0 @@
-/*
- * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/threadCritical.hpp"
-#include "services/memTracker.hpp"
-#include "services/memTrackWorker.hpp"
-#include "utilities/decoder.hpp"
-#include "utilities/vmError.hpp"
-
-
-void GenerationData::reset() {
-  _number_of_classes = 0;
-  while (_recorder_list != NULL) {
-    MemRecorder* tmp = _recorder_list;
-    _recorder_list = _recorder_list->next();
-    MemTracker::release_thread_recorder(tmp);
-  }
-}
-
-MemTrackWorker::MemTrackWorker(MemSnapshot* snapshot): _snapshot(snapshot) {
-  // create thread uses cgc thread type for now. We should revisit
-  // the option, or create new thread type.
-  _has_error = !os::create_thread(this, os::cgc_thread);
-  set_name("MemTrackWorker");
-
-  // initial generation circuit buffer
-  if (!has_error()) {
-    _head = _tail = 0;
-    for(int index = 0; index < MAX_GENERATIONS; index ++) {
-      ::new ((void*)&_gen[index]) GenerationData();
-    }
-  }
-  NOT_PRODUCT(_sync_point_count = 0;)
-  NOT_PRODUCT(_merge_count = 0;)
-  NOT_PRODUCT(_last_gen_in_use = 0;)
-}
-
-MemTrackWorker::~MemTrackWorker() {
-  for (int index = 0; index < MAX_GENERATIONS; index ++) {
-    _gen[index].reset();
-  }
-}
-
-void* MemTrackWorker::operator new(size_t size) throw() {
-  assert(false, "use nothrow version");
-  return NULL;
-}
-
-void* MemTrackWorker::operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
-  return allocate(size, false, mtNMT);
-}
-
-void MemTrackWorker::start() {
-  os::start_thread(this);
-}
-
-/*
- * Native memory tracking worker thread loop:
- *   1. merge one generation of memory recorders to staging area
- *   2. promote staging data to memory snapshot
- *
- * This thread can run through safepoint.
- */
-
-void MemTrackWorker::run() {
-  assert(MemTracker::is_on(), "native memory tracking is off");
-  this->initialize_thread_local_storage();
-  this->record_stack_base_and_size();
-  assert(_snapshot != NULL, "Worker should not be started");
-  MemRecorder* rec;
-  unsigned long processing_generation = 0;
-  bool          worker_idle = false;
-
-  while (!MemTracker::shutdown_in_progress()) {
-    NOT_PRODUCT(_last_gen_in_use = generations_in_use();)
-    {
-      // take a recorder from earliest generation in buffer
-      ThreadCritical tc;
-      rec = _gen[_head].next_recorder();
-    }
-    if (rec != NULL) {
-      if (rec->get_generation() != processing_generation || worker_idle) {
-        processing_generation = rec->get_generation();
-        worker_idle = false;
-        MemTracker::set_current_processing_generation(processing_generation);
-      }
-
-      // merge the recorder into staging area
-      if (!_snapshot->merge(rec)) {
-        MemTracker::shutdown(MemTracker::NMT_out_of_memory);
-      } else {
-        NOT_PRODUCT(_merge_count ++;)
-      }
-      MemTracker::release_thread_recorder(rec);
-    } else {
-      // no more recorder to merge, promote staging area
-      // to snapshot
-      if (_head != _tail) {
-        long number_of_classes;
-        {
-          ThreadCritical tc;
-          if (_gen[_head].has_more_recorder() || _head == _tail) {
-            continue;
-          }
-          number_of_classes = _gen[_head].number_of_classes();
-          _gen[_head].reset();
-
-          // done with this generation, increment _head pointer
-          _head = (_head + 1) % MAX_GENERATIONS;
-        }
-        // promote this generation data to snapshot
-        if (!_snapshot->promote(number_of_classes)) {
-          // failed to promote, means out of memory
-          MemTracker::shutdown(MemTracker::NMT_out_of_memory);
-        }
-      } else {
-        // worker thread is idle
-        worker_idle = true;
-        MemTracker::report_worker_idle();
-        _snapshot->wait(1000);
-        ThreadCritical tc;
-        // check if more data arrived
-        if (!_gen[_head].has_more_recorder()) {
-          _gen[_head].add_recorders(MemTracker::get_pending_recorders());
-        }
-      }
-    }
-  }
-  assert(MemTracker::shutdown_in_progress(), "just check");
-
-  // transits to final shutdown
-  MemTracker::final_shutdown();
-}
-
-// at synchronization point, where 'safepoint visible' Java threads are blocked
-// at a safepoint, and the rest of threads are blocked on ThreadCritical lock.
-// The caller MemTracker::sync() already takes ThreadCritical before calling this
-// method.
-//
-// Following tasks are performed:
-//   1. add all recorders in pending queue to current generation
-//   2. increase generation
-
-void MemTrackWorker::at_sync_point(MemRecorder* rec, int number_of_classes) {
-  NOT_PRODUCT(_sync_point_count ++;)
-  assert(count_recorder(rec) <= MemRecorder::_instance_count,
-    "pending queue has infinite loop");
-
-  bool out_of_generation_buffer = false;
-  // check shutdown state inside ThreadCritical
-  if (MemTracker::shutdown_in_progress()) return;
-
-  _gen[_tail].set_number_of_classes(number_of_classes);
-  // append the recorders to the end of the generation
-  _gen[_tail].add_recorders(rec);
-  assert(count_recorder(_gen[_tail].peek()) <= MemRecorder::_instance_count,
-    "after add to current generation has infinite loop");
-  // we have collected all recorders for this generation. If there is data,
-  // we need to increment _tail to start a new generation.
-  if (_gen[_tail].has_more_recorder()  || _head == _tail) {
-    _tail = (_tail + 1) % MAX_GENERATIONS;
-    out_of_generation_buffer = (_tail == _head);
-  }
-
-  if (out_of_generation_buffer) {
-    MemTracker::shutdown(MemTracker::NMT_out_of_generation);
-  }
-}
-
-#ifndef PRODUCT
-int MemTrackWorker::count_recorder(const MemRecorder* head) {
-  int count = 0;
-  while(head != NULL) {
-    count ++;
-    head = head->next();
-  }
-  return count;
-}
-
-int MemTrackWorker::count_pending_recorders() const {
-  int count = 0;
-  for (int index = 0; index < MAX_GENERATIONS; index ++) {
-    MemRecorder* head = _gen[index].peek();
-    if (head != NULL) {
-      count += count_recorder(head);
-    }
-  }
-  return count;
-}
-#endif
--- a/hotspot/src/share/vm/services/memTrackWorker.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,118 +0,0 @@
-/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SERVICES_MEM_TRACK_WORKER_HPP
-#define SHARE_VM_SERVICES_MEM_TRACK_WORKER_HPP
-
-#include "memory/allocation.hpp"
-#include "runtime/thread.hpp"
-#include "services/memRecorder.hpp"
-
-// Maximum MAX_GENERATIONS generation data can be tracked.
-#define MAX_GENERATIONS  512
-
-class GenerationData VALUE_OBJ_CLASS_SPEC {
- private:
-  int           _number_of_classes;
-  MemRecorder*  _recorder_list;
-
- public:
-  GenerationData(): _number_of_classes(0), _recorder_list(NULL) { }
-
-  inline int  number_of_classes() const { return _number_of_classes; }
-  inline void set_number_of_classes(long num) { _number_of_classes = num; }
-
-  inline MemRecorder* next_recorder() {
-    if (_recorder_list == NULL) {
-      return NULL;
-    } else {
-      MemRecorder* tmp = _recorder_list;
-      _recorder_list = _recorder_list->next();
-      return tmp;
-    }
-  }
-
-  inline bool has_more_recorder() const {
-    return (_recorder_list != NULL);
-  }
-
-  // add recorders to this generation
-  void add_recorders(MemRecorder* head) {
-    if (head != NULL) {
-      if (_recorder_list == NULL) {
-        _recorder_list = head;
-      } else {
-        MemRecorder* tmp = _recorder_list;
-        for (; tmp->next() != NULL; tmp = tmp->next());
-        tmp->set_next(head);
-      }
-    }
-  }
-
-  void reset();
-
-  NOT_PRODUCT(MemRecorder* peek() const { return _recorder_list; })
-};
-
-class MemTrackWorker : public NamedThread {
- private:
-  // circular buffer. This buffer contains generation data to be merged into global
-  // snaphsot.
-  // Each slot holds a generation
-  GenerationData  _gen[MAX_GENERATIONS];
-  int             _head, _tail; // head and tail pointers to above circular buffer
-
-  bool            _has_error;
-
-  MemSnapshot*    _snapshot;
-
- public:
-  MemTrackWorker(MemSnapshot* snapshot);
-  ~MemTrackWorker();
-  _NOINLINE_ void* operator new(size_t size) throw();
-  _NOINLINE_ void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw();
-
-  void start();
-  void run();
-
-  inline bool has_error() const { return _has_error; }
-
-  // task at synchronization point
-  void at_sync_point(MemRecorder* pending_recorders, int number_of_classes);
-
-  // for debugging purpose, they are not thread safe.
-  NOT_PRODUCT(static int count_recorder(const MemRecorder* head);)
-  NOT_PRODUCT(int count_pending_recorders() const;)
-
-  NOT_PRODUCT(int _sync_point_count;)
-  NOT_PRODUCT(int _merge_count;)
-  NOT_PRODUCT(int _last_gen_in_use;)
-
-  // how many generations are queued
-  inline int generations_in_use() const {
-    return (_tail >= _head ? (_tail - _head + 1) : (MAX_GENERATIONS - (_head - _tail) + 1));
-  }
-};
-
-#endif // SHARE_VM_SERVICES_MEM_TRACK_WORKER_HPP
--- a/hotspot/src/share/vm/services/memTracker.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/services/memTracker.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,862 +23,308 @@
  */
 #include "precompiled.hpp"
 
-#include "oops/instanceKlass.hpp"
-#include "runtime/atomic.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/safepoint.hpp"
-#include "runtime/threadCritical.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/vm_operations.hpp"
-#include "services/memPtr.hpp"
+#include "runtime/mutex.hpp"
+#include "services/memBaseline.hpp"
 #include "services/memReporter.hpp"
+#include "services/mallocTracker.inline.hpp"
 #include "services/memTracker.hpp"
-#include "utilities/decoder.hpp"
 #include "utilities/defaultStream.hpp"
-#include "utilities/globalDefinitions.hpp"
 
-bool NMT_track_callsite = false;
+#ifdef SOLARIS
+  volatile bool NMT_stack_walkable = false;
+#else
+  volatile bool NMT_stack_walkable = true;
+#endif
 
-// walk all 'known' threads at NMT sync point, and collect their recorders
-void SyncThreadRecorderClosure::do_thread(Thread* thread) {
-  assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
-  if (thread->is_Java_thread()) {
-    JavaThread* javaThread = (JavaThread*)thread;
-    MemRecorder* recorder = javaThread->get_recorder();
-    if (recorder != NULL) {
-      MemTracker::enqueue_pending_recorder(recorder);
-      javaThread->set_recorder(NULL);
-    }
-  }
-  _thread_count ++;
-}
+volatile NMT_TrackingLevel MemTracker::_tracking_level = NMT_unknown;
+NMT_TrackingLevel MemTracker::_cmdline_tracking_level = NMT_unknown;
+
+NativeCallStack emptyStack(0, false);
+
+MemBaseline MemTracker::_baseline;
+Mutex*      MemTracker::_query_lock = NULL;
+bool MemTracker::_is_nmt_env_valid = true;
 
 
-MemRecorder* volatile           MemTracker::_global_recorder = NULL;
-MemSnapshot*                    MemTracker::_snapshot = NULL;
-MemBaseline                     MemTracker::_baseline;
-Mutex*                          MemTracker::_query_lock = NULL;
-MemRecorder* volatile           MemTracker::_merge_pending_queue = NULL;
-MemRecorder* volatile           MemTracker::_pooled_recorders = NULL;
-MemTrackWorker*                 MemTracker::_worker_thread = NULL;
-int                             MemTracker::_sync_point_skip_count = 0;
-MemTracker::NMTLevel            MemTracker::_tracking_level = MemTracker::NMT_off;
-volatile MemTracker::NMTStates  MemTracker::_state = NMT_uninited;
-MemTracker::ShutdownReason      MemTracker::_reason = NMT_shutdown_none;
-int                             MemTracker::_thread_count = 255;
-volatile jint                   MemTracker::_pooled_recorder_count = 0;
-volatile unsigned long          MemTracker::_processing_generation = 0;
-volatile bool                   MemTracker::_worker_thread_idle = false;
-volatile jint                   MemTracker::_pending_op_count = 0;
-volatile bool                   MemTracker::_slowdown_calling_thread = false;
-debug_only(intx                 MemTracker::_main_thread_tid = 0;)
-NOT_PRODUCT(volatile jint       MemTracker::_pending_recorder_count = 0;)
-
-void MemTracker::init_tracking_options(const char* option_line) {
-  _tracking_level = NMT_off;
-  if (strcmp(option_line, "=summary") == 0) {
-    _tracking_level = NMT_summary;
-  } else if (strcmp(option_line, "=detail") == 0) {
-    // detail relies on a stack-walking ability that may not
-    // be available depending on platform and/or compiler flags
+NMT_TrackingLevel MemTracker::init_tracking_level() {
+  NMT_TrackingLevel level = NMT_off;
+  char buf[64];
+  char nmt_option[64];
+  jio_snprintf(buf, sizeof(buf), "NMT_LEVEL_%d", os::current_process_id());
+  if (os::getenv(buf, nmt_option, sizeof(nmt_option))) {
+    if (strcmp(nmt_option, "summary") == 0) {
+      level = NMT_summary;
+    } else if (strcmp(nmt_option, "detail") == 0) {
 #if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
-      _tracking_level = NMT_detail;
+      level = NMT_detail;
 #else
-      jio_fprintf(defaultStream::error_stream(),
-        "NMT detail is not supported on this platform.  Using NMT summary instead.\n");
-      _tracking_level = NMT_summary;
-#endif
-  } else if (strcmp(option_line, "=off") != 0) {
-    vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
-  }
-}
-
-// first phase of bootstrapping, when VM is still in single-threaded mode.
-void MemTracker::bootstrap_single_thread() {
-  if (_tracking_level > NMT_off) {
-    assert(_state == NMT_uninited, "wrong state");
-
-    // NMT is not supported with UseMallocOnly is on. NMT can NOT
-    // handle the amount of malloc data without significantly impacting
-    // runtime performance when this flag is on.
-    if (UseMallocOnly) {
-      shutdown(NMT_use_malloc_only);
-      return;
+      level = NMT_summary;
+#endif // PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
+    } else if (strcmp(nmt_option, "off") != 0) {
+      // The option value is invalid
+      _is_nmt_env_valid = false;
     }
 
-    _query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock");
-    if (_query_lock == NULL) {
-      shutdown(NMT_out_of_memory);
-      return;
-    }
+    // Remove the environment variable to avoid leaking to child processes
+    os::unsetenv(buf);
+  }
 
-    debug_only(_main_thread_tid = os::current_thread_id();)
-    _state = NMT_bootstrapping_single_thread;
-    NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
+  if (!MallocTracker::initialize(level) ||
+      !VirtualMemoryTracker::initialize(level)) {
+    level = NMT_off;
   }
+  return level;
 }
 
-// second phase of bootstrapping, when VM is about to or already entered multi-theaded mode.
-void MemTracker::bootstrap_multi_thread() {
-  if (_tracking_level > NMT_off && _state == NMT_bootstrapping_single_thread) {
-  // create nmt lock for multi-thread execution
-    assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
-    _state = NMT_bootstrapping_multi_thread;
-    NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
-  }
-}
-
-// fully start nmt
-void MemTracker::start() {
-  // Native memory tracking is off from command line option
-  if (_tracking_level == NMT_off || shutdown_in_progress()) return;
-
-  assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
-  assert(_state == NMT_bootstrapping_multi_thread, "wrong state");
-
-  _snapshot = new (std::nothrow)MemSnapshot();
-  if (_snapshot != NULL) {
-    if (!_snapshot->out_of_memory() && start_worker(_snapshot)) {
-      _state = NMT_started;
-      NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
-      return;
-    }
-
-    delete _snapshot;
-    _snapshot = NULL;
-  }
-
-  // fail to start native memory tracking, shut it down
-  shutdown(NMT_initialization);
-}
-
-/**
- * Shutting down native memory tracking.
- * We can not shutdown native memory tracking immediately, so we just
- * setup shutdown pending flag, every native memory tracking component
- * should orderly shut itself down.
- *
- * The shutdown sequences:
- *  1. MemTracker::shutdown() sets MemTracker to shutdown pending state
- *  2. Worker thread calls MemTracker::final_shutdown(), which transites
- *     MemTracker to final shutdown state.
- *  3. At sync point, MemTracker does final cleanup, before sets memory
- *     tracking level to off to complete shutdown.
- */
-void MemTracker::shutdown(ShutdownReason reason) {
-  if (_tracking_level == NMT_off) return;
-
-  if (_state <= NMT_bootstrapping_single_thread) {
-    // we still in single thread mode, there is not contention
-    _state = NMT_shutdown_pending;
-    _reason = reason;
-  } else {
-    // we want to know who initialized shutdown
-    if ((jint)NMT_started == Atomic::cmpxchg((jint)NMT_shutdown_pending,
-                                       (jint*)&_state, (jint)NMT_started)) {
-        _reason = reason;
-    }
-  }
-}
-
-// final phase of shutdown
-void MemTracker::final_shutdown() {
-  // delete all pending recorders and pooled recorders
-  delete_all_pending_recorders();
-  delete_all_pooled_recorders();
-
-  {
-    // shared baseline and snapshot are the only objects needed to
-    // create query results
-    MutexLockerEx locker(_query_lock, true);
-    // cleanup baseline data and snapshot
-    _baseline.clear();
-    delete _snapshot;
-    _snapshot = NULL;
-  }
-
-  // shutdown shared decoder instance, since it is only
-  // used by native memory tracking so far.
-  Decoder::shutdown();
-
-  MemTrackWorker* worker = NULL;
-  {
-    ThreadCritical tc;
-    // can not delete worker inside the thread critical
-    if (_worker_thread != NULL && Thread::current() == _worker_thread) {
-      worker = _worker_thread;
-      _worker_thread = NULL;
-    }
-  }
-  if (worker != NULL) {
-    delete worker;
-  }
-  _state = NMT_final_shutdown;
-}
-
-// delete all pooled recorders
-void MemTracker::delete_all_pooled_recorders() {
-  // free all pooled recorders
-  MemRecorder* volatile cur_head = _pooled_recorders;
-  if (cur_head != NULL) {
-    MemRecorder* null_ptr = NULL;
-    while (cur_head != NULL && (void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr,
-      (void*)&_pooled_recorders, (void*)cur_head)) {
-      cur_head = _pooled_recorders;
-    }
-    if (cur_head != NULL) {
-      delete cur_head;
-      _pooled_recorder_count = 0;
+void MemTracker::init() {
+  if (tracking_level() >= NMT_summary) {
+    _query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock");
+    // Already OOM. It is unlikely, but still have to handle it.
+    if (_query_lock == NULL) {
+      shutdown();
     }
   }
 }
 
-// delete all recorders in pending queue
-void MemTracker::delete_all_pending_recorders() {
-  // free all pending recorders
-  MemRecorder* pending_head = get_pending_recorders();
-  if (pending_head != NULL) {
-    delete pending_head;
+bool MemTracker::check_launcher_nmt_support(const char* value) {
+  if (strcmp(value, "=detail") == 0) {
+#if !PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
+      jio_fprintf(defaultStream::error_stream(),
+        "NMT detail is not supported on this platform.  Using NMT summary instead.\n");
+    if (MemTracker::tracking_level() != NMT_summary) {
+    return false;
+  }
+#else
+    if (MemTracker::tracking_level() != NMT_detail) {
+      return false;
+    }
+#endif
+  } else if (strcmp(value, "=summary") == 0) {
+    if (MemTracker::tracking_level() != NMT_summary) {
+      return false;
+    }
+  } else if (strcmp(value, "=off") == 0) {
+    if (MemTracker::tracking_level() != NMT_off) {
+      return false;
+    }
+  } else {
+    _is_nmt_env_valid = false;
+  }
+
+  return true;
+}
+
+bool MemTracker::verify_nmt_option() {
+  return _is_nmt_env_valid;
+}
+
+void* MemTracker::malloc_base(void* memblock) {
+  return MallocTracker::get_base(memblock);
+}
+
+void Tracker::record(address addr, size_t size) {
+  if (MemTracker::tracking_level() < NMT_summary) return;
+  switch(_type) {
+    case uncommit:
+      VirtualMemoryTracker::remove_uncommitted_region(addr, size);
+      break;
+    case release:
+      VirtualMemoryTracker::remove_released_region(addr, size);
+        break;
+    default:
+      ShouldNotReachHere();
   }
 }
 
-/*
- * retrieve per-thread recorder of specified thread.
- * if thread == NULL, it means global recorder
- */
-MemRecorder* MemTracker::get_thread_recorder(JavaThread* thread) {
-  if (shutdown_in_progress()) return NULL;
+
+// Shutdown can only be issued via JCmd, and NMT JCmd is serialized
+// by lock
+void MemTracker::shutdown() {
+  // We can only shutdown NMT to minimal tracking level if it is
+  // ever on.
+  if (tracking_level () > NMT_minimal) {
+    transition_to(NMT_minimal);
+  }
+}
+
+bool MemTracker::transition_to(NMT_TrackingLevel level) {
+  NMT_TrackingLevel current_level = tracking_level();
 
-  MemRecorder* rc;
-  if (thread == NULL) {
-    rc = _global_recorder;
+  if (current_level == level) {
+    return true;
+  } else if (current_level > level) {
+    // Downgrade tracking level, we want to lower the tracking
+    // level first
+    _tracking_level = level;
+    // Make _tracking_level visible immediately.
+    OrderAccess::fence();
+    VirtualMemoryTracker::transition(current_level, level);
+    MallocTracker::transition(current_level, level);
+
+    if (level == NMT_minimal) _baseline.reset();
   } else {
-    rc = thread->get_recorder();
-  }
+    VirtualMemoryTracker::transition(current_level, level);
+    MallocTracker::transition(current_level, level);
 
-  if (rc != NULL && rc->is_full()) {
-    enqueue_pending_recorder(rc);
-    rc = NULL;
+    _tracking_level = level;
+    // Make _tracking_level visible immediately.
+    OrderAccess::fence();
   }
 
-  if (rc == NULL) {
-    rc = get_new_or_pooled_instance();
-    if (thread == NULL) {
-      _global_recorder = rc;
-    } else {
-      thread->set_recorder(rc);
-    }
-  }
-  return rc;
+  return true;
 }
 
-/*
- * get a per-thread recorder from pool, or create a new one if
- * there is not one available.
- */
-MemRecorder* MemTracker::get_new_or_pooled_instance() {
-   MemRecorder* cur_head = const_cast<MemRecorder*> (_pooled_recorders);
-   if (cur_head == NULL) {
-     MemRecorder* rec = new (std::nothrow)MemRecorder();
-     if (rec == NULL || rec->out_of_memory()) {
-       shutdown(NMT_out_of_memory);
-       if (rec != NULL) {
-         delete rec;
-         rec = NULL;
-       }
-     }
-     return rec;
-   } else {
-     MemRecorder* next_head = cur_head->next();
-     if ((void*)cur_head != Atomic::cmpxchg_ptr((void*)next_head, (void*)&_pooled_recorders,
-       (void*)cur_head)) {
-       return get_new_or_pooled_instance();
-     }
-     cur_head->set_next(NULL);
-     Atomic::dec(&_pooled_recorder_count);
-     cur_head->set_generation();
-     return cur_head;
+void MemTracker::final_report(outputStream* output) {
+  assert(output != NULL, "No output stream");
+  if (tracking_level() >= NMT_summary) {
+    MallocMemorySnapshot* malloc_memory_snapshot =
+      MallocMemorySummary::as_snapshot();
+    malloc_memory_snapshot->make_adjustment();
+
+    VirtualMemorySnapshot* virtual_memory_snapshot =
+      VirtualMemorySummary::as_snapshot();
+
+    MemSummaryReporter rptr(malloc_memory_snapshot,
+      virtual_memory_snapshot, output);
+    rptr.report();
+    // shutdown NMT, the data no longer accurate
+    shutdown();
   }
 }
 
-/*
- * retrieve all recorders in pending queue, and empty the queue
- */
-MemRecorder* MemTracker::get_pending_recorders() {
-  MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
-  MemRecorder* null_ptr = NULL;
-  while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, (void*)&_merge_pending_queue,
-    (void*)cur_head)) {
-    cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
-  }
-  NOT_PRODUCT(Atomic::store(0, &_pending_recorder_count));
-  return cur_head;
-}
+// This is a walker to gather malloc site hashtable statistics,
+// the result is used for tuning.
+class StatisticsWalker : public MallocSiteWalker {
+ private:
+  enum Threshold {
+    // aggregates statistics over this threshold into one
+    // line item.
+    report_threshold = 20
+  };
 
-/*
- * release a recorder to recorder pool.
- */
-void MemTracker::release_thread_recorder(MemRecorder* rec) {
-  assert(rec != NULL, "null recorder");
-  // we don't want to pool too many recorders
-  rec->set_next(NULL);
-  if (shutdown_in_progress() || _pooled_recorder_count > _thread_count * 2) {
-    delete rec;
-    return;
-  }
+ private:
+  // Number of allocation sites that have all memory freed
+  int   _empty_entries;
+  // Total number of allocation sites, include empty sites
+  int   _total_entries;
+  // Number of captured call stack distribution
+  int   _stack_depth_distribution[NMT_TrackingStackDepth];
+  // Hash distribution
+  int   _hash_distribution[report_threshold];
+  // Number of hash buckets that have entries over the threshold
+  int   _bucket_over_threshold;
 
-  rec->clear();
-  MemRecorder* cur_head = const_cast<MemRecorder*>(_pooled_recorders);
-  rec->set_next(cur_head);
-  while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_pooled_recorders,
-    (void*)cur_head)) {
-    cur_head = const_cast<MemRecorder*>(_pooled_recorders);
-    rec->set_next(cur_head);
-  }
-  Atomic::inc(&_pooled_recorder_count);
-}
-
-// write a record to proper recorder. No lock can be taken from this method
-// down.
-void MemTracker::write_tracking_record(address addr, MEMFLAGS flags,
-    size_t size, jint seq, address pc, JavaThread* thread) {
+  // The hash bucket that walker is currently walking
+  int   _current_hash_bucket;
+  // The length of current hash bucket
+  int   _current_bucket_length;
+  // Number of hash buckets that are not empty
+  int   _used_buckets;
+  // Longest hash bucket length
+  int   _longest_bucket_length;
 
-    MemRecorder* rc = get_thread_recorder(thread);
-    if (rc != NULL) {
-      rc->record(addr, flags, size, seq, pc);
+ public:
+  StatisticsWalker() : _empty_entries(0), _total_entries(0) {
+    int index = 0;
+    for (index = 0; index < NMT_TrackingStackDepth; index ++) {
+      _stack_depth_distribution[index] = 0;
     }
-}
-
-/**
- * enqueue a recorder to pending queue
- */
-void MemTracker::enqueue_pending_recorder(MemRecorder* rec) {
-  assert(rec != NULL, "null recorder");
-
-  // we are shutting down, so just delete it
-  if (shutdown_in_progress()) {
-    rec->set_next(NULL);
-    delete rec;
-    return;
+    for (index = 0; index < report_threshold; index ++) {
+      _hash_distribution[index] = 0;
+    }
+    _bucket_over_threshold = 0;
+    _longest_bucket_length = 0;
+    _current_hash_bucket = -1;
+    _current_bucket_length = 0;
+    _used_buckets = 0;
   }
 
-  MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
-  rec->set_next(cur_head);
-  while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_merge_pending_queue,
-    (void*)cur_head)) {
-    cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
-    rec->set_next(cur_head);
+  virtual bool do_malloc_site(const MallocSite* e) {
+    if (e->size() == 0) _empty_entries ++;
+    _total_entries ++;
+
+    // stack depth distrubution
+    int frames = e->call_stack()->frames();
+    _stack_depth_distribution[frames - 1] ++;
+
+    // hash distribution
+    int hash_bucket = e->hash() % MallocSiteTable::hash_buckets();
+    if (_current_hash_bucket == -1) {
+      _current_hash_bucket = hash_bucket;
+      _current_bucket_length = 1;
+    } else if (_current_hash_bucket == hash_bucket) {
+      _current_bucket_length ++;
+    } else {
+      record_bucket_length(_current_bucket_length);
+      _current_hash_bucket = hash_bucket;
+      _current_bucket_length = 1;
+    }
+    return true;
   }
-  NOT_PRODUCT(Atomic::inc(&_pending_recorder_count);)
-}
 
-/*
- * The method is called at global safepoint
- * during it synchronization process.
- *   1. enqueue all JavaThreads' per-thread recorders
- *   2. enqueue global recorder
- *   3. retrieve all pending recorders
- *   4. reset global sequence number generator
- *   5. call worker's sync
- */
-#define MAX_SAFEPOINTS_TO_SKIP     128
-#define SAFE_SEQUENCE_THRESHOLD    30
-#define HIGH_GENERATION_THRESHOLD  60
-#define MAX_RECORDER_THREAD_RATIO  30
-#define MAX_RECORDER_PER_THREAD    100
-
-void MemTracker::sync() {
-  assert(_tracking_level > NMT_off, "NMT is not enabled");
-  assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
+  // walk completed
+  void completed() {
+    record_bucket_length(_current_bucket_length);
+  }
 
-  // Some GC tests hit large number of safepoints in short period of time
-  // without meaningful activities. We should prevent going to
-  // sync point in these cases, which can potentially exhaust generation buffer.
-  // Here is the factots to determine if we should go into sync point:
-  // 1. not to overflow sequence number
-  // 2. if we are in danger to overflow generation buffer
-  // 3. how many safepoints we already skipped sync point
-  if (_state == NMT_started) {
-    // worker thread is not ready, no one can manage generation
-    // buffer, so skip this safepoint
-    if (_worker_thread == NULL) return;
-
-    if (_sync_point_skip_count < MAX_SAFEPOINTS_TO_SKIP) {
-      int per_seq_in_use = SequenceGenerator::peek() * 100 / max_jint;
-      int per_gen_in_use = _worker_thread->generations_in_use() * 100 / MAX_GENERATIONS;
-      if (per_seq_in_use < SAFE_SEQUENCE_THRESHOLD && per_gen_in_use >= HIGH_GENERATION_THRESHOLD) {
-        _sync_point_skip_count ++;
-        return;
+  void report_statistics(outputStream* out) {
+    int index;
+    out->print_cr("Malloc allocation site table:");
+    out->print_cr("\tTotal entries: %d", _total_entries);
+    out->print_cr("\tEmpty entries: %d (%2.2f%%)", _empty_entries, ((float)_empty_entries * 100) / _total_entries);
+    out->print_cr(" ");
+    out->print_cr("Hash distribution:");
+    if (_used_buckets < MallocSiteTable::hash_buckets()) {
+      out->print_cr("empty bucket: %d", (MallocSiteTable::hash_buckets() - _used_buckets));
+    }
+    for (index = 0; index < report_threshold; index ++) {
+      if (_hash_distribution[index] != 0) {
+        if (index == 0) {
+          out->print_cr("  %d    entry: %d", 1, _hash_distribution[0]);
+        } else if (index < 9) { // single digit
+          out->print_cr("  %d  entries: %d", (index + 1), _hash_distribution[index]);
+        } else {
+          out->print_cr(" %d entries: %d", (index + 1), _hash_distribution[index]);
+        }
       }
     }
-    {
-      // This method is running at safepoint, with ThreadCritical lock,
-      // it should guarantee that NMT is fully sync-ed.
-      ThreadCritical tc;
-
-      // We can NOT execute NMT sync-point if there are pending tracking ops.
-      if (_pending_op_count == 0) {
-        SequenceGenerator::reset();
-        _sync_point_skip_count = 0;
-
-        // walk all JavaThreads to collect recorders
-        SyncThreadRecorderClosure stc;
-        Threads::threads_do(&stc);
-
-        _thread_count = stc.get_thread_count();
-        MemRecorder* pending_recorders = get_pending_recorders();
-
-        if (_global_recorder != NULL) {
-          _global_recorder->set_next(pending_recorders);
-          pending_recorders = _global_recorder;
-          _global_recorder = NULL;
-        }
-
-        // see if NMT has too many outstanding recorder instances, it usually
-        // means that worker thread is lagging behind in processing them.
-        if (!AutoShutdownNMT) {
-          _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count);
-        } else {
-          // If auto shutdown is on, enforce MAX_RECORDER_PER_THREAD threshold to prevent OOM
-          if (MemRecorder::_instance_count >= _thread_count * MAX_RECORDER_PER_THREAD) {
-            shutdown(NMT_out_of_memory);
-          }
-        }
-
-        // check _worker_thread with lock to avoid racing condition
-        if (_worker_thread != NULL) {
-          _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes());
-        }
-        assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point");
-      } else {
-        _sync_point_skip_count ++;
+    if (_bucket_over_threshold > 0) {
+      out->print_cr(" >%d entries: %d", report_threshold,  _bucket_over_threshold);
+    }
+    out->print_cr("most entries: %d", _longest_bucket_length);
+    out->print_cr(" ");
+    out->print_cr("Call stack depth distribution:");
+    for (index = 0; index < NMT_TrackingStackDepth; index ++) {
+      if (_stack_depth_distribution[index] > 0) {
+        out->print_cr("\t%d: %d", index + 1, _stack_depth_distribution[index]);
       }
     }
   }
 
-  // now, it is the time to shut whole things off
-  if (_state == NMT_final_shutdown) {
-    // walk all JavaThreads to delete all recorders
-    SyncThreadRecorderClosure stc;
-    Threads::threads_do(&stc);
-    // delete global recorder
-    {
-      ThreadCritical tc;
-      if (_global_recorder != NULL) {
-        delete _global_recorder;
-        _global_recorder = NULL;
-      }
-    }
-    MemRecorder* pending_recorders = get_pending_recorders();
-    if (pending_recorders != NULL) {
-      delete pending_recorders;
-    }
-    // try at a later sync point to ensure MemRecorder instance drops to zero to
-    // completely shutdown NMT
-    if (MemRecorder::_instance_count == 0) {
-      _state = NMT_shutdown;
-      _tracking_level = NMT_off;
+ private:
+  void record_bucket_length(int length) {
+    _used_buckets ++;
+    if (length <= report_threshold) {
+      _hash_distribution[length - 1] ++;
+    } else {
+      _bucket_over_threshold ++;
     }
-  }
-}
-
-/*
- * Start worker thread.
- */
-bool MemTracker::start_worker(MemSnapshot* snapshot) {
-  assert(_worker_thread == NULL && _snapshot != NULL, "Just Check");
-  _worker_thread = new (std::nothrow) MemTrackWorker(snapshot);
-  if (_worker_thread == NULL) {
-    return false;
-  } else if (_worker_thread->has_error()) {
-    delete _worker_thread;
-    _worker_thread = NULL;
-    return false;
-  }
-  _worker_thread->start();
-  return true;
-}
-
-/*
- * We need to collect a JavaThread's per-thread recorder
- * before it exits.
- */
-void MemTracker::thread_exiting(JavaThread* thread) {
-  if (is_on()) {
-    MemRecorder* rec = thread->get_recorder();
-    if (rec != NULL) {
-      enqueue_pending_recorder(rec);
-      thread->set_recorder(NULL);
-    }
-  }
-}
-
-// baseline current memory snapshot
-bool MemTracker::baseline() {
-  MutexLocker lock(_query_lock);
-  MemSnapshot* snapshot = get_snapshot();
-  if (snapshot != NULL) {
-    return _baseline.baseline(*snapshot, false);
-  }
-  return false;
-}
-
-// print memory usage from current snapshot
-bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
-  MemBaseline  baseline;
-  MutexLocker  lock(_query_lock);
-  MemSnapshot* snapshot = get_snapshot();
-  if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
-    BaselineReporter reporter(out, unit);
-    reporter.report_baseline(baseline, summary_only);
-    return true;
+    _longest_bucket_length = MAX2(_longest_bucket_length, length);
   }
-  return false;
-}
-
-// Whitebox API for blocking until the current generation of NMT data has been merged
-bool MemTracker::wbtest_wait_for_data_merge() {
-  // NMT can't be shutdown while we're holding _query_lock
-  MutexLocker lock(_query_lock);
-  assert(_worker_thread != NULL, "Invalid query");
-  // the generation at query time, so NMT will spin till this generation is processed
-  unsigned long generation_at_query_time = SequenceGenerator::current_generation();
-  unsigned long current_processing_generation = _processing_generation;
-  // if generation counter overflown
-  bool generation_overflown = (generation_at_query_time < current_processing_generation);
-  long generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
-  // spin
-  while (!shutdown_in_progress()) {
-    if (!generation_overflown) {
-      if (current_processing_generation > generation_at_query_time) {
-        return true;
-      }
-    } else {
-      assert(generations_to_wrap >= 0, "Sanity check");
-      long current_generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
-      assert(current_generations_to_wrap >= 0, "Sanity check");
-      // to overflow an unsigned long should take long time, so to_wrap check should be sufficient
-      if (current_generations_to_wrap > generations_to_wrap &&
-          current_processing_generation > generation_at_query_time) {
-        return true;
-      }
-    }
-
-    // if worker thread is idle, but generation is not advancing, that means
-    // there is not safepoint to let NMT advance generation, force one.
-    if (_worker_thread_idle) {
-      VM_ForceSafepoint vfs;
-      VMThread::execute(&vfs);
-    }
-    MemSnapshot* snapshot = get_snapshot();
-    if (snapshot == NULL) {
-      return false;
-    }
-    snapshot->wait(1000);
-    current_processing_generation = _processing_generation;
-  }
-  // We end up here if NMT is shutting down before our data has been merged
-  return false;
-}
-
-// compare memory usage between current snapshot and baseline
-bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
-  MutexLocker lock(_query_lock);
-  if (_baseline.baselined()) {
-    MemBaseline baseline;
-    MemSnapshot* snapshot = get_snapshot();
-    if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
-      BaselineReporter reporter(out, unit);
-      reporter.diff_baselines(baseline, _baseline, summary_only);
-      return true;
-    }
-  }
-  return false;
-}
-
-#ifndef PRODUCT
-void MemTracker::walk_stack(int toSkip, char* buf, int len) {
-  int cur_len = 0;
-  char tmp[1024];
-  address pc;
-
-  while (cur_len < len) {
-    pc = os::get_caller_pc(toSkip + 1);
-    if (pc != NULL && os::dll_address_to_function_name(pc, tmp, sizeof(tmp), NULL)) {
-      jio_snprintf(&buf[cur_len], (len - cur_len), "%s\n", tmp);
-      cur_len = (int)strlen(buf);
-    } else {
-      buf[cur_len] = '\0';
-      break;
-    }
-    toSkip ++;
-  }
-}
-
-void MemTracker::print_tracker_stats(outputStream* st) {
-  st->print_cr("\nMemory Tracker Stats:");
-  st->print_cr("\tMax sequence number = %d", SequenceGenerator::max_seq_num());
-  st->print_cr("\tthead count = %d", _thread_count);
-  st->print_cr("\tArena instance = %d", Arena::_instance_count);
-  st->print_cr("\tpooled recorder count = %d", _pooled_recorder_count);
-  st->print_cr("\tqueued recorder count = %d", _pending_recorder_count);
-  st->print_cr("\tmemory recorder instance count = %d", MemRecorder::_instance_count);
-  if (_worker_thread != NULL) {
-    st->print_cr("\tWorker thread:");
-    st->print_cr("\t\tSync point count = %d", _worker_thread->_sync_point_count);
-    st->print_cr("\t\tpending recorder count = %d", _worker_thread->count_pending_recorders());
-    st->print_cr("\t\tmerge count = %d", _worker_thread->_merge_count);
-  } else {
-    st->print_cr("\tWorker thread is not started");
-  }
-  st->print_cr(" ");
-
-  if (_snapshot != NULL) {
-    _snapshot->print_snapshot_stats(st);
-  } else {
-    st->print_cr("No snapshot");
-  }
-}
-#endif
+};
 
 
-// Tracker Implementation
+void MemTracker::tuning_statistics(outputStream* out) {
+  // NMT statistics
+  StatisticsWalker walker;
+  MallocSiteTable::walk_malloc_site(&walker);
+  walker.completed();
 
-/*
- * Create a tracker.
- * This is a fairly complicated constructor, as it has to make two important decisions:
- *   1) Does it need to take ThreadCritical lock to write tracking record
- *   2) Does it need to pre-reserve a sequence number for the tracking record
- *
- * The rules to determine if ThreadCritical is needed:
- *   1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM
- *      still in single thread mode.
- *   2. For all threads other than JavaThread, ThreadCritical is needed
- *      to write to recorders to global recorder.
- *   3. For JavaThreads that are no longer visible by safepoint, also
- *      need to take ThreadCritical and records are written to global
- *      recorders, since these threads are NOT walked by Threads.do_thread().
- *   4. JavaThreads that are running in safepoint-safe states do not stop
- *      for safepoints, ThreadCritical lock should be taken to write
- *      memory records.
- *   5. JavaThreads that are running in VM state do not need any lock and
- *      records are written to per-thread recorders.
- *   6. For a thread has yet to attach VM 'Thread', they need to take
- *      ThreadCritical to write to global recorder.
- *
- *  The memory operations that need pre-reserve sequence numbers:
- *    The memory operations that "release" memory blocks and the
- *    operations can fail, need to pre-reserve sequence number. They
- *    are realloc, uncommit and release.
- *
- *  The reason for pre-reserve sequence number, is to prevent race condition:
- *    Thread 1                      Thread 2
- *    <release>
- *                                  <allocate>
- *                                  <write allocate record>
- *   <write release record>
- *   if Thread 2 happens to obtain the memory address Thread 1 just released,
- *   then NMT can mistakenly report the memory is free.
- *
- *  Noticeably, free() does not need pre-reserve sequence number, because the call
- *  does not fail, so we can alway write "release" record before the memory is actaully
- *  freed.
- *
- *  For realloc, uncommit and release, following coding pattern should be used:
- *
- *     MemTracker::Tracker tkr = MemTracker::get_realloc_tracker();
- *     ptr = ::realloc(...);
- *     if (ptr == NULL) {
- *       tkr.record(...)
- *     } else {
- *       tkr.discard();
- *     }
- *
- *     MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
- *     if (uncommit(...)) {
- *       tkr.record(...);
- *     } else {
- *       tkr.discard();
- *     }
- *
- *     MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
- *     if (release(...)) {
- *       tkr.record(...);
- *     } else {
- *       tkr.discard();
- *     }
- *
- * Since pre-reserved sequence number is only good for the generation that it is acquired,
- * when there is pending Tracker that reserved sequence number, NMT sync-point has
- * to be skipped to prevent from advancing generation. This is done by inc and dec
- * MemTracker::_pending_op_count, when MemTracker::_pending_op_count > 0, NMT sync-point is skipped.
- * Not all pre-reservation of sequence number will increment pending op count. For JavaThreads
- * that honor safepoints, safepoint can not occur during the memory operations, so the
- * pre-reserved sequence number won't cross the generation boundry.
- */
-MemTracker::Tracker::Tracker(MemoryOperation op, Thread* thr) {
-  _op = NoOp;
-  _seq = 0;
-  if (MemTracker::is_on()) {
-    _java_thread = NULL;
-    _op = op;
-
-    // figure out if ThreadCritical lock is needed to write this operation
-    // to MemTracker
-    if (MemTracker::is_single_threaded_bootstrap()) {
-      thr = NULL;
-    } else if (thr == NULL) {
-      // don't use Thread::current(), since it is possible that
-      // the calling thread has yet to attach to VM 'Thread',
-      // which will result assertion failure
-      thr = ThreadLocalStorage::thread();
-    }
-
-    if (thr != NULL) {
-      // Check NMT load
-      MemTracker::check_NMT_load(thr);
-
-      if (thr->is_Java_thread() && ((JavaThread*)thr)->is_safepoint_visible()) {
-        _java_thread = (JavaThread*)thr;
-        JavaThreadState  state = _java_thread->thread_state();
-        // JavaThreads that are safepoint safe, can run through safepoint,
-        // so ThreadCritical is needed to ensure no threads at safepoint create
-        // new records while the records are being gathered and the sequence number is changing
-        _need_thread_critical_lock =
-          SafepointSynchronize::safepoint_safe(_java_thread, state);
-      } else {
-        _need_thread_critical_lock = true;
-      }
-    } else {
-       _need_thread_critical_lock
-         = !MemTracker::is_single_threaded_bootstrap();
-    }
-
-    // see if we need to pre-reserve sequence number for this operation
-    if (_op == Realloc || _op == Uncommit || _op == Release) {
-      if (_need_thread_critical_lock) {
-        ThreadCritical tc;
-        MemTracker::inc_pending_op_count();
-        _seq = SequenceGenerator::next();
-      } else {
-        // for the threads that honor safepoints, no safepoint can occur
-        // during the lifespan of tracker, so we don't need to increase
-        // pending op count.
-        _seq = SequenceGenerator::next();
-      }
-    }
-  }
+  out->print_cr("Native Memory Tracking Statistics:");
+  out->print_cr("Malloc allocation site table size: %d", MallocSiteTable::hash_buckets());
+  out->print_cr("             Tracking stack depth: %d", NMT_TrackingStackDepth);
+  NOT_PRODUCT(out->print_cr("Peak concurrent access: %d", MallocSiteTable::access_peak_count());)
+  out->print_cr(" ");
+  walker.report_statistics(out);
 }
 
-void MemTracker::Tracker::discard() {
-  if (MemTracker::is_on() && _seq != 0) {
-    if (_need_thread_critical_lock) {
-      ThreadCritical tc;
-      MemTracker::dec_pending_op_count();
-    }
-    _seq = 0;
-  }
-}
-
-
-void MemTracker::Tracker::record(address old_addr, address new_addr, size_t size,
-  MEMFLAGS flags, address pc) {
-  assert(old_addr != NULL && new_addr != NULL, "Sanity check");
-  assert(_op == Realloc || _op == NoOp, "Wrong call");
-  if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp && !MemTracker::shutdown_in_progress()) {
-    assert(_seq > 0, "Need pre-reserve sequence number");
-    if (_need_thread_critical_lock) {
-      ThreadCritical tc;
-      // free old address, use pre-reserved sequence number
-      MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(),
-        0, _seq, pc, _java_thread);
-      MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(),
-        size, SequenceGenerator::next(), pc, _java_thread);
-      // decrement MemTracker pending_op_count
-      MemTracker::dec_pending_op_count();
-    } else {
-      // free old address, use pre-reserved sequence number
-      MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(),
-        0, _seq, pc, _java_thread);
-      MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(),
-        size, SequenceGenerator::next(), pc, _java_thread);
-    }
-    _seq = 0;
-  }
-}
-
-void MemTracker::Tracker::record(address addr, size_t size, MEMFLAGS flags, address pc) {
-  // OOM already?
-  if (addr == NULL) return;
-
-  if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp && !MemTracker::shutdown_in_progress()) {
-    bool pre_reserved_seq = (_seq != 0);
-    address  pc = CALLER_CALLER_PC;
-    MEMFLAGS orig_flags = flags;
-
-    // or the tagging flags
-    switch(_op) {
-      case Malloc:
-        flags |= MemPointerRecord::malloc_tag();
-        break;
-      case Free:
-        flags = MemPointerRecord::free_tag();
-        break;
-      case Realloc:
-        fatal("Use the other Tracker::record()");
-        break;
-      case Reserve:
-      case ReserveAndCommit:
-        flags |= MemPointerRecord::virtual_memory_reserve_tag();
-        break;
-      case Commit:
-        flags = MemPointerRecord::virtual_memory_commit_tag();
-        break;
-      case Type:
-        flags |= MemPointerRecord::virtual_memory_type_tag();
-        break;
-      case Uncommit:
-        assert(pre_reserved_seq, "Need pre-reserve sequence number");
-        flags = MemPointerRecord::virtual_memory_uncommit_tag();
-        break;
-      case Release:
-        assert(pre_reserved_seq, "Need pre-reserve sequence number");
-        flags = MemPointerRecord::virtual_memory_release_tag();
-        break;
-      case ArenaSize:
-        // a bit of hack here, add a small postive offset to arena
-        // address for its size record, so the size record is sorted
-        // right after arena record.
-        flags = MemPointerRecord::arena_size_tag();
-        addr += sizeof(void*);
-        break;
-      case StackRelease:
-        flags = MemPointerRecord::virtual_memory_release_tag();
-        break;
-      default:
-        ShouldNotReachHere();
-    }
-
-    // write memory tracking record
-    if (_need_thread_critical_lock) {
-      ThreadCritical tc;
-      if (_seq == 0) _seq = SequenceGenerator::next();
-      MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread);
-      if (_op == ReserveAndCommit) {
-        MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(),
-          size, SequenceGenerator::next(), pc, _java_thread);
-      }
-      if (pre_reserved_seq) MemTracker::dec_pending_op_count();
-    } else {
-      if (_seq == 0) _seq = SequenceGenerator::next();
-      MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread);
-      if (_op == ReserveAndCommit) {
-        MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(),
-          size, SequenceGenerator::next(), pc, _java_thread);
-      }
-    }
-    _seq = 0;
-  }
-}
-
--- a/hotspot/src/share/vm/services/memTracker.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/services/memTracker.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,566 +25,289 @@
 #ifndef SHARE_VM_SERVICES_MEM_TRACKER_HPP
 #define SHARE_VM_SERVICES_MEM_TRACKER_HPP
 
-#include "utilities/macros.hpp"
+#include "services/nmtCommon.hpp"
+
+class NativeCallStack;
+extern NativeCallStack emptyStack;
 
 #if !INCLUDE_NMT
 
-#include "utilities/ostream.hpp"
+#define CURRENT_PC   emptyStack
+#define CALLER_PC    emptyStack
 
-class BaselineOutputer : public StackObj {
-
+class Tracker : public StackObj {
+ public:
+  Tracker() { }
+  void record(address addr, size_t size) { }
 };
 
-class BaselineTTYOutputer : public BaselineOutputer {
-  public:
-    BaselineTTYOutputer(outputStream* st) { }
+class MemTracker : AllStatic {
+ public:
+  static inline NMT_TrackingLevel tracking_level() { return NMT_off; }
+  static inline void shutdown() { }
+  static inline void init() { }
+  static bool check_launcher_nmt_support(const char* value) { return true; }
+  static bool verify_nmt_option() { return true; }
+
+  static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag,
+    const NativeCallStack& stack, NMT_TrackingLevel level) { return mem_base; }
+  static inline size_t malloc_header_size(NMT_TrackingLevel level) { return 0; }
+  static inline size_t malloc_header_size(void* memblock) { return 0; }
+  static inline void* malloc_base(void* memblock) { return memblock; }
+  static inline void* record_free(void* memblock) { return memblock; }
+
+  static inline void record_new_arena(MEMFLAGS flag) { }
+  static inline void record_arena_free(MEMFLAGS flag) { }
+  static inline void record_arena_size_change(int diff, MEMFLAGS flag) { }
+  static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack,
+                       MEMFLAGS flag = mtNone) { }
+  static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
+    const NativeCallStack& stack, MEMFLAGS flag = mtNone) { }
+  static inline void record_virtual_memory_commit(void* addr, size_t size, const NativeCallStack& stack) { }
+  static inline Tracker get_virtual_memory_uncommit_tracker() { return Tracker(); }
+  static inline Tracker get_virtual_memory_release_tracker() { }
+  static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { }
+  static inline void record_thread_stack(void* addr, size_t size) { }
+  static inline void release_thread_stack(void* addr, size_t size) { }
+
+  static void final_report(outputStream*) { }
+};
+
+#else
+
+#include "runtime/atomic.hpp"
+#include "runtime/threadCritical.hpp"
+#include "services/mallocTracker.hpp"
+#include "services/virtualMemoryTracker.hpp"
+
+extern volatile bool NMT_stack_walkable;
+
+#define CURRENT_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \
+                    NativeCallStack(0, true) : emptyStack)
+#define CALLER_PC  ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ?  \
+                    NativeCallStack(1, true) : emptyStack)
+
+class MemBaseline;
+class Mutex;
+
+// Tracker is used for guarding 'release' semantics of virtual memory operation, to avoid
+// the other thread obtains and records the same region that is just 'released' by current
+// thread but before it can record the operation.
+class Tracker : public StackObj {
+ public:
+  enum TrackerType {
+     uncommit,
+     release
+  };
+
+ public:
+  Tracker(enum TrackerType type) : _type(type) { }
+  void record(address addr, size_t size);
+ private:
+  enum TrackerType  _type;
+  // Virtual memory tracking data structures are protected by ThreadCritical lock.
+  ThreadCritical    _tc;
 };
 
 class MemTracker : AllStatic {
-  public:
-   enum ShutdownReason {
-      NMT_shutdown_none,     // no shutdown requested
-      NMT_shutdown_user,     // user requested shutdown
-      NMT_normal,            // normal shutdown, process exit
-      NMT_out_of_memory,     // shutdown due to out of memory
-      NMT_initialization,    // shutdown due to initialization failure
-      NMT_use_malloc_only,   // can not combine NMT with UseMallocOnly flag
-      NMT_error_reporting,   // shutdown by vmError::report_and_die()
-      NMT_out_of_generation, // running out of generation queue
-      NMT_sequence_overflow  // overflow the sequence number
-   };
-
-  class Tracker {
-   public:
-    void discard() { }
-
-    void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL) { }
-    void record(address old_addr, address new_addr, size_t size,
-      MEMFLAGS flags, address pc = NULL) { }
-  };
-
-  private:
-   static Tracker  _tkr;
-
-
-  public:
-   static inline void init_tracking_options(const char* option_line) { }
-   static inline bool is_on()   { return false; }
-   static const char* reason()  { return "Native memory tracking is not implemented"; }
-   static inline bool can_walk_stack() { return false; }
-
-   static inline void bootstrap_single_thread() { }
-   static inline void bootstrap_multi_thread() { }
-   static inline void start() { }
-
-   static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
-        address pc = 0, Thread* thread = NULL) { }
-   static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) { }
-   static inline void record_arena_size(address addr, size_t size) { }
-   static inline void record_virtual_memory_reserve(address addr, size_t size,
-        MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
-   static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size,
-        MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
-   static inline void record_virtual_memory_commit(address addr, size_t size,
-        address pc = 0, Thread* thread = NULL) { }
-   static inline void record_virtual_memory_release(address addr, size_t size,
-        Thread* thread = NULL) { }
-   static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
-        Thread* thread = NULL) { }
-   static inline Tracker get_realloc_tracker() { return _tkr; }
-   static inline Tracker get_virtual_memory_uncommit_tracker() { return _tkr; }
-   static inline Tracker get_virtual_memory_release_tracker()  { return _tkr; }
-   static inline bool baseline() { return false; }
-   static inline bool has_baseline() { return false; }
-
-   static inline void set_autoShutdown(bool value) { }
-   static void shutdown(ShutdownReason reason) { }
-   static inline bool shutdown_in_progress() { return false; }
-   static bool print_memory_usage(BaselineOutputer& out, size_t unit,
-            bool summary_only = true) { return false; }
-   static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
-            bool summary_only = true) { return false; }
-
-   static bool wbtest_wait_for_data_merge() { return false; }
-
-   static inline void sync() { }
-   static inline void thread_exiting(JavaThread* thread) { }
-};
-
-
-#else // !INCLUDE_NMT
-
-#include "memory/allocation.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/mutex.hpp"
-#include "runtime/os.hpp"
-#include "runtime/thread.hpp"
-#include "services/memPtr.hpp"
-#include "services/memRecorder.hpp"
-#include "services/memSnapshot.hpp"
-#include "services/memTrackWorker.hpp"
-
-extern bool NMT_track_callsite;
-
-#ifndef MAX_UNSIGNED_LONG
-#define MAX_UNSIGNED_LONG    (unsigned long)(-1)
-#endif
-
-#ifdef ASSERT
-  #define DEBUG_CALLER_PC  (NMT_track_callsite ? os::get_caller_pc(2) : 0)
-#else
-  #define DEBUG_CALLER_PC  0
-#endif
-
-// The thread closure walks threads to collect per-thread
-// memory recorders at NMT sync point
-class SyncThreadRecorderClosure : public ThreadClosure {
- private:
-  int _thread_count;
-
  public:
-  SyncThreadRecorderClosure() {
-    _thread_count =0;
-  }
-
-  void do_thread(Thread* thread);
-  int  get_thread_count() const {
-    return _thread_count;
-  }
-};
-
-class BaselineOutputer;
-class MemSnapshot;
-class MemTrackWorker;
-class Thread;
-/*
- * MemTracker is the 'gate' class to native memory tracking runtime.
- */
-class MemTracker : AllStatic {
-  friend class GenerationData;
-  friend class MemTrackWorker;
-  friend class MemSnapshot;
-  friend class SyncThreadRecorderClosure;
-
-  // NMT state
-  enum NMTStates {
-    NMT_uninited,                        // not yet initialized
-    NMT_bootstrapping_single_thread,     // bootstrapping, VM is in single thread mode
-    NMT_bootstrapping_multi_thread,      // bootstrapping, VM is about to enter multi-thread mode
-    NMT_started,                         // NMT fully started
-    NMT_shutdown_pending,                // shutdown pending
-    NMT_final_shutdown,                  // in final phase of shutdown
-    NMT_shutdown                         // shutdown
-  };
-
- public:
-  class Tracker : public StackObj {
-    friend class MemTracker;
-   public:
-    enum MemoryOperation {
-      NoOp,                   // no op
-      Malloc,                 // malloc
-      Realloc,                // realloc
-      Free,                   // free
-      Reserve,                // virtual memory reserve
-      Commit,                 // virtual memory commit
-      ReserveAndCommit,       // virtual memory reserve and commit
-      StackAlloc = ReserveAndCommit, // allocate thread stack
-      Type,                   // assign virtual memory type
-      Uncommit,               // virtual memory uncommit
-      Release,                // virtual memory release
-      ArenaSize,              // set arena size
-      StackRelease            // release thread stack
-    };
-
-
-   protected:
-    Tracker(MemoryOperation op, Thread* thr = NULL);
-
-   public:
-    void discard();
-
-    void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL);
-    void record(address old_addr, address new_addr, size_t size,
-      MEMFLAGS flags, address pc = NULL);
-
-   private:
-    bool            _need_thread_critical_lock;
-    JavaThread*     _java_thread;
-    MemoryOperation _op;          // memory operation
-    jint            _seq;         // reserved sequence number
-  };
-
-
- public:
-  // native memory tracking level
-  enum NMTLevel {
-    NMT_off,              // native memory tracking is off
-    NMT_summary,          // don't track callsite
-    NMT_detail            // track callsite also
-  };
-
-   enum ShutdownReason {
-     NMT_shutdown_none,     // no shutdown requested
-     NMT_shutdown_user,     // user requested shutdown
-     NMT_normal,            // normal shutdown, process exit
-     NMT_out_of_memory,     // shutdown due to out of memory
-     NMT_initialization,    // shutdown due to initialization failure
-     NMT_use_malloc_only,   // can not combine NMT with UseMallocOnly flag
-     NMT_error_reporting,   // shutdown by vmError::report_and_die()
-     NMT_out_of_generation, // running out of generation queue
-     NMT_sequence_overflow  // overflow the sequence number
-   };
-
- public:
-  // initialize NMT tracking level from command line options, called
-   // from VM command line parsing code
-  static void init_tracking_options(const char* option_line);
-
-  // if NMT is enabled to record memory activities
-  static inline bool is_on() {
-    return (_tracking_level >= NMT_summary &&
-      _state >= NMT_bootstrapping_single_thread);
-  }
-
-  static inline enum NMTLevel tracking_level() {
+  static inline NMT_TrackingLevel tracking_level() {
+    if (_tracking_level == NMT_unknown) {
+      // No fencing is needed here, since JVM is in single-threaded
+      // mode.
+      _tracking_level = init_tracking_level();
+      _cmdline_tracking_level = _tracking_level;
+    }
     return _tracking_level;
   }
 
-  // user readable reason for shutting down NMT
-  static const char* reason() {
-    switch(_reason) {
-      case NMT_shutdown_none:
-        return "Native memory tracking is not enabled";
-      case NMT_shutdown_user:
-        return "Native memory tracking has been shutdown by user";
-      case NMT_normal:
-        return "Native memory tracking has been shutdown due to process exiting";
-      case NMT_out_of_memory:
-        return "Native memory tracking has been shutdown due to out of native memory";
-      case NMT_initialization:
-        return "Native memory tracking failed to initialize";
-      case NMT_error_reporting:
-        return "Native memory tracking has been shutdown due to error reporting";
-      case NMT_out_of_generation:
-        return "Native memory tracking has been shutdown due to running out of generation buffer";
-      case NMT_sequence_overflow:
-        return "Native memory tracking has been shutdown due to overflow the sequence number";
-      case NMT_use_malloc_only:
-        return "Native memory tracking is not supported when UseMallocOnly is on";
-      default:
-        ShouldNotReachHere();
-        return NULL;
-    }
+  // A late initialization, for the stuff(s) can not be
+  // done in init_tracking_level(), which can NOT malloc
+  // any memory.
+  static void init();
+
+  // Shutdown native memory tracking
+  static void shutdown();
+
+  // Verify native memory tracking command line option.
+  // This check allows JVM to detect if compatible launcher
+  // is used.
+  // If an incompatible launcher is used, NMT may not be
+  // able to start, even it is enabled by command line option.
+  // A warning message should be given if it is encountered.
+  static bool check_launcher_nmt_support(const char* value);
+
+  // This method checks native memory tracking environment
+  // variable value passed by launcher.
+  // Launcher only obligated to pass native memory tracking
+  // option value, but not obligated to validate the value,
+  // and launcher has option to discard native memory tracking
+  // option from the command line once it sets up the environment
+  // variable, so NMT has to catch the bad value here.
+  static bool verify_nmt_option();
+
+  // Transition the tracking level to specified level
+  static bool transition_to(NMT_TrackingLevel level);
+
+  static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag,
+    const NativeCallStack& stack, NMT_TrackingLevel level) {
+    return MallocTracker::record_malloc(mem_base, size, flag, stack, level);
+  }
+
+  static inline size_t malloc_header_size(NMT_TrackingLevel level) {
+    return MallocTracker::malloc_header_size(level);
   }
 
-  // test if we can walk native stack
-  static bool can_walk_stack() {
-  // native stack is not walkable during bootstrapping on sparc
-#if defined(SPARC)
-    return (_state == NMT_started);
-#else
-    return (_state >= NMT_bootstrapping_single_thread && _state  <= NMT_started);
-#endif
+  static size_t malloc_header_size(void* memblock) {
+    if (tracking_level() != NMT_off) {
+      return MallocTracker::get_header_size(memblock);
+    }
+    return 0;
+  }
+
+  // To malloc base address, which is the starting address
+  // of malloc tracking header if tracking is enabled.
+  // Otherwise, it returns the same address.
+  static void* malloc_base(void* memblock);
+
+  // Record malloc free and return malloc base address
+  static inline void* record_free(void* memblock) {
+    return MallocTracker::record_free(memblock);
   }
 
-  // if native memory tracking tracks callsite
-  static inline bool track_callsite() { return _tracking_level == NMT_detail; }
+
+  // Record creation of an arena
+  static inline void record_new_arena(MEMFLAGS flag) {
+    if (tracking_level() < NMT_summary) return;
+    MallocTracker::record_new_arena(flag);
+  }
+
+  // Record destruction of an arena
+  static inline void record_arena_free(MEMFLAGS flag) {
+    if (tracking_level() < NMT_summary) return;
+    MallocTracker::record_arena_free(flag);
+  }
 
-  // NMT automatically shuts itself down under extreme situation by default.
-  // When the value is set to false,  NMT will try its best to stay alive,
-  // even it has to slow down VM.
-  static inline void set_autoShutdown(bool value) {
-    AutoShutdownNMT = value;
-    if (AutoShutdownNMT && _slowdown_calling_thread) {
-      _slowdown_calling_thread = false;
+  // Record arena size change. Arena size is the size of all arena
+  // chuncks that backing up the arena.
+  static inline void record_arena_size_change(int diff, MEMFLAGS flag) {
+    if (tracking_level() < NMT_summary) return;
+    MallocTracker::record_arena_size_change(diff, flag);
+  }
+
+  static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack,
+    MEMFLAGS flag = mtNone) {
+    if (tracking_level() < NMT_summary) return;
+    if (addr != NULL) {
+      ThreadCritical tc;
+      // Recheck to avoid potential racing during NMT shutdown
+      if (tracking_level() < NMT_summary) return;
+      VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag);
     }
   }
 
-  // shutdown native memory tracking capability. Native memory tracking
-  // can be shutdown by VM when it encounters low memory scenarios.
-  // Memory tracker should gracefully shutdown itself, and preserve the
-  // latest memory statistics for post morten diagnosis.
-  static void shutdown(ShutdownReason reason);
-
-  // if there is shutdown requested
-  static inline bool shutdown_in_progress() {
-    return (_state >= NMT_shutdown_pending);
-  }
-
-  // bootstrap native memory tracking, so it can start to collect raw data
-  // before worker thread can start
-
-  // the first phase of bootstrapping, when VM still in single-threaded mode
-  static void bootstrap_single_thread();
-  // the second phase of bootstrapping, VM is about or already in multi-threaded mode
-  static void bootstrap_multi_thread();
-
-
-  // start() has to be called when VM still in single thread mode, but after
-  // command line option parsing is done.
-  static void start();
-
-  // record a 'malloc' call
-  static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
-                            address pc = 0, Thread* thread = NULL) {
-    Tracker tkr(Tracker::Malloc, thread);
-    tkr.record(addr, size, flags, pc);
-  }
-  // record a 'free' call
-  static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) {
-    Tracker tkr(Tracker::Free, thread);
-    tkr.record(addr, 0, flags, DEBUG_CALLER_PC);
-  }
-
-  static inline void record_arena_size(address addr, size_t size) {
-    Tracker tkr(Tracker::ArenaSize);
-    tkr.record(addr, size);
-  }
-
-  // record a virtual memory 'reserve' call
-  static inline void record_virtual_memory_reserve(address addr, size_t size,
-                     MEMFLAGS flags, address pc = 0, Thread* thread = NULL) {
-    assert(size > 0, "Sanity check");
-    Tracker tkr(Tracker::Reserve, thread);
-    tkr.record(addr, size, flags, pc);
-  }
-
-  static inline void record_thread_stack(address addr, size_t size, Thread* thr,
-                           address pc = 0) {
-    Tracker tkr(Tracker::StackAlloc, thr);
-    tkr.record(addr, size, mtThreadStack, pc);
-  }
-
-  static inline void release_thread_stack(address addr, size_t size, Thread* thr) {
-    Tracker tkr(Tracker::StackRelease, thr);
-    tkr.record(addr, size, mtThreadStack, DEBUG_CALLER_PC);
-  }
-
-  // record a virtual memory 'commit' call
-  static inline void record_virtual_memory_commit(address addr, size_t size,
-                            address pc, Thread* thread = NULL) {
-    Tracker tkr(Tracker::Commit, thread);
-    tkr.record(addr, size, mtNone, pc);
-  }
-
-  static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size,
-    MEMFLAGS flags, address pc, Thread* thread = NULL) {
-    Tracker tkr(Tracker::ReserveAndCommit, thread);
-    tkr.record(addr, size, flags, pc);
-  }
-
-  static inline void record_virtual_memory_release(address addr, size_t size,
-      Thread* thread = NULL) {
-    if (is_on()) {
-      Tracker tkr(Tracker::Release, thread);
-      tkr.record(addr, size);
+  static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
+    const NativeCallStack& stack, MEMFLAGS flag = mtNone) {
+    if (tracking_level() < NMT_summary) return;
+    if (addr != NULL) {
+      ThreadCritical tc;
+      if (tracking_level() < NMT_summary) return;
+      VirtualMemoryTracker::add_reserved_region((address)addr, size,
+        stack, flag, true);
     }
   }
 
-  // record memory type on virtual memory base address
-  static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
-                            Thread* thread = NULL) {
-    Tracker tkr(Tracker::Type);
-    tkr.record(base, 0, flags);
-  }
-
-  // Get memory trackers for memory operations that can result race conditions.
-  // The memory tracker has to be obtained before realloc, virtual memory uncommit
-  // and virtual memory release, and call tracker.record() method if operation
-  // succeeded, or tracker.discard() to abort the tracking.
-  static inline Tracker get_realloc_tracker() {
-    return Tracker(Tracker::Realloc);
+  static inline void record_virtual_memory_commit(void* addr, size_t size,
+    const NativeCallStack& stack) {
+    if (tracking_level() < NMT_summary) return;
+    if (addr != NULL) {
+      ThreadCritical tc;
+      if (tracking_level() < NMT_summary) return;
+      VirtualMemoryTracker::add_committed_region((address)addr, size, stack);
+    }
   }
 
   static inline Tracker get_virtual_memory_uncommit_tracker() {
-    return Tracker(Tracker::Uncommit);
+    assert(tracking_level() >= NMT_summary, "Check by caller");
+    return Tracker(Tracker::uncommit);
   }
 
   static inline Tracker get_virtual_memory_release_tracker() {
-    return Tracker(Tracker::Release);
-  }
-
-
-  // create memory baseline of current memory snapshot
-  static bool baseline();
-  // is there a memory baseline
-  static bool has_baseline() {
-    return _baseline.baselined();
+    assert(tracking_level() >= NMT_summary, "Check by caller");
+    return Tracker(Tracker::release);
   }
 
-  // print memory usage from current snapshot
-  static bool print_memory_usage(BaselineOutputer& out, size_t unit,
-           bool summary_only = true);
-  // compare memory usage between current snapshot and baseline
-  static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
-           bool summary_only = true);
-
-  // the version for whitebox testing support, it ensures that all memory
-  // activities before this method call, are reflected in the snapshot
-  // database.
-  static bool wbtest_wait_for_data_merge();
-
-  // sync is called within global safepoint to synchronize nmt data
-  static void sync();
-
-  // called when a thread is about to exit
-  static void thread_exiting(JavaThread* thread);
-
-  // retrieve global snapshot
-  static MemSnapshot* get_snapshot() {
-    if (shutdown_in_progress()) {
-      return NULL;
+  static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) {
+    if (tracking_level() < NMT_summary) return;
+    if (addr != NULL) {
+      ThreadCritical tc;
+      if (tracking_level() < NMT_summary) return;
+      VirtualMemoryTracker::set_reserved_region_type((address)addr, flag);
     }
-    return _snapshot;
   }
 
-  // print tracker stats
-  NOT_PRODUCT(static void print_tracker_stats(outputStream* st);)
-  NOT_PRODUCT(static void walk_stack(int toSkip, char* buf, int len);)
-
- private:
-  // start native memory tracking worker thread
-  static bool start_worker(MemSnapshot* snapshot);
-
-  // called by worker thread to complete shutdown process
-  static void final_shutdown();
-
- protected:
-  // retrieve per-thread recorder of the specified thread.
-  // if the recorder is full, it will be enqueued to overflow
-  // queue, a new recorder is acquired from recorder pool or a
-  // new instance is created.
-  // when thread == NULL, it means global recorder
-  static MemRecorder* get_thread_recorder(JavaThread* thread);
-
-  // per-thread recorder pool
-  static void release_thread_recorder(MemRecorder* rec);
-  static void delete_all_pooled_recorders();
-
-  // pending recorder queue. Recorders are queued to pending queue
-  // when they are overflowed or collected at nmt sync point.
-  static void enqueue_pending_recorder(MemRecorder* rec);
-  static MemRecorder* get_pending_recorders();
-  static void delete_all_pending_recorders();
-
-  // write a memory tracking record in recorder
-  static void write_tracking_record(address addr, MEMFLAGS type,
-    size_t size, jint seq, address pc, JavaThread* thread);
-
-  static bool is_single_threaded_bootstrap() {
-    return _state == NMT_bootstrapping_single_thread;
+  static inline void record_thread_stack(void* addr, size_t size) {
+    if (tracking_level() < NMT_summary) return;
+    if (addr != NULL) {
+      // uses thread stack malloc slot for book keeping number of threads
+      MallocMemorySummary::record_malloc(0, mtThreadStack);
+      record_virtual_memory_reserve_and_commit(addr, size, CALLER_PC, mtThreadStack);
+    }
   }
 
-  static void check_NMT_load(Thread* thr) {
-    assert(thr != NULL, "Sanity check");
-    if (_slowdown_calling_thread && thr != _worker_thread) {
-#ifdef _WINDOWS
-      // On Windows, os::NakedYield() does not work as well
-      // as short sleep.
-      os::naked_short_sleep(1);
-#else
-      os::naked_yield();
-#endif
+  static inline void release_thread_stack(void* addr, size_t size) {
+    if (tracking_level() < NMT_summary) return;
+    if (addr != NULL) {
+      // uses thread stack malloc slot for book keeping number of threads
+      MallocMemorySummary::record_free(0, mtThreadStack);
+      ThreadCritical tc;
+      if (tracking_level() < NMT_summary) return;
+      VirtualMemoryTracker::remove_released_region((address)addr, size);
     }
   }
 
-  static void inc_pending_op_count() {
-    Atomic::inc(&_pending_op_count);
-  }
+  // Query lock is used to synchronize the access to tracking data.
+  // So far, it is only used by JCmd query, but it may be used by
+  // other tools.
+  static inline Mutex* query_lock() { return _query_lock; }
 
-  static void dec_pending_op_count() {
-    Atomic::dec(&_pending_op_count);
-    assert(_pending_op_count >= 0, "Sanity check");
+  // Make a final report and shutdown.
+  // This function generates summary report without creating snapshots,
+  // to avoid additional memory allocation. It uses native memory summary
+  // counters, and makes adjustment to them, once the adjustment is made,
+  // the counters are no longer accurate. As the result, this function
+  // should only be used for final reporting before shutting down.
+  static void final_report(outputStream*);
+
+  // Stored baseline
+  static inline MemBaseline& get_baseline() {
+    return _baseline;
   }
 
-
- private:
-  // retrieve a pooled memory record or create new one if there is not
-  // one available
-  static MemRecorder* get_new_or_pooled_instance();
-  static void create_memory_record(address addr, MEMFLAGS type,
-                   size_t size, address pc, Thread* thread);
-  static void create_record_in_recorder(address addr, MEMFLAGS type,
-                   size_t size, address pc, JavaThread* thread);
-
-  static void set_current_processing_generation(unsigned long generation) {
-    _worker_thread_idle = false;
-    _processing_generation = generation;
+  static NMT_TrackingLevel cmdline_tracking_level() {
+    return _cmdline_tracking_level;
   }
 
-  static void report_worker_idle() {
-    _worker_thread_idle = true;
-  }
+  static void tuning_statistics(outputStream* out);
 
  private:
-  // global memory snapshot
-  static MemSnapshot*     _snapshot;
-
-  // a memory baseline of snapshot
-  static MemBaseline      _baseline;
-
-  // query lock
-  static Mutex*           _query_lock;
-
-  // a thread can start to allocate memory before it is attached
-  // to VM 'Thread', those memory activities are recorded here.
-  // ThreadCritical is required to guard this global recorder.
-  static MemRecorder* volatile _global_recorder;
-
-  // main thread id
-  debug_only(static intx   _main_thread_tid;)
-
-  // pending recorders to be merged
-  static MemRecorder* volatile     _merge_pending_queue;
-
-  NOT_PRODUCT(static volatile jint   _pending_recorder_count;)
-
-  // pooled memory recorders
-  static MemRecorder* volatile     _pooled_recorders;
-
-  // memory recorder pool management, uses following
-  // counter to determine if a released memory recorder
-  // should be pooled
+  static NMT_TrackingLevel init_tracking_level();
 
-  // latest thread count
-  static int               _thread_count;
-  // pooled recorder count
-  static volatile jint     _pooled_recorder_count;
-
-
-  // worker thread to merge pending recorders into snapshot
-  static MemTrackWorker*  _worker_thread;
-
-  // how many safepoints we skipped without entering sync point
-  static int              _sync_point_skip_count;
-
-  // if the tracker is properly intialized
-  static bool             _is_tracker_ready;
-  // tracking level (off, summary and detail)
-  static enum NMTLevel    _tracking_level;
-
-  // current nmt state
-  static volatile enum NMTStates   _state;
-  // the reason for shutting down nmt
-  static enum ShutdownReason       _reason;
-  // the generation that NMT is processing
-  static volatile unsigned long    _processing_generation;
-  // although NMT is still procesing current generation, but
-  // there is not more recorder to process, set idle state
-  static volatile bool             _worker_thread_idle;
-
-  // if NMT should slow down calling thread to allow
-  // worker thread to catch up
-  static volatile bool             _slowdown_calling_thread;
-
-  // pending memory op count.
-  // Certain memory ops need to pre-reserve sequence number
-  // before memory operation can happen to avoid race condition.
-  // See MemTracker::Tracker for detail
-  static volatile jint             _pending_op_count;
+ private:
+  // Tracking level
+  static volatile NMT_TrackingLevel   _tracking_level;
+  // If NMT option value passed by launcher through environment
+  // variable is valid
+  static bool                         _is_nmt_env_valid;
+  // command line tracking level
+  static NMT_TrackingLevel            _cmdline_tracking_level;
+  // Stored baseline
+  static MemBaseline      _baseline;
+  // Query lock
+  static Mutex*           _query_lock;
 };
 
-#endif // !INCLUDE_NMT
+#endif // INCLUDE_NMT
 
 #endif // SHARE_VM_SERVICES_MEM_TRACKER_HPP
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/nmtCommon.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+#include "services/nmtCommon.hpp"
+
+const char* NMTUtil::_memory_type_names[] = {
+  "Java Heap",
+  "Class",
+  "Thread",
+  "Thread Stack",
+  "Code",
+  "GC",
+  "Compiler",
+  "Internal",
+  "Other",
+  "Symbol",
+  "Native Memory Tracking",
+  "Shared class space",
+  "Arena Chunk",
+  "Test",
+  "Tracing",
+  "Unknown"
+};
+
+
+const char* NMTUtil::scale_name(size_t scale) {
+  switch(scale) {
+    case K: return "KB";
+    case M: return "MB";
+    case G: return "GB";
+  }
+  ShouldNotReachHere();
+  return NULL;
+}
+
+size_t NMTUtil::scale_from_name(const char* scale) {
+  assert(scale != NULL, "Null pointer check");
+  if (strncmp(scale, "KB", 2) == 0 ||
+      strncmp(scale, "kb", 2) == 0) {
+    return K;
+  } else if (strncmp(scale, "MB", 2) == 0 ||
+             strncmp(scale, "mb", 2) == 0) {
+    return M;
+  } else if (strncmp(scale, "GB", 2) == 0 ||
+             strncmp(scale, "gb", 2) == 0) {
+    return G;
+  } else {
+    return 0; // Invalid value
+  }
+  return K;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/nmtCommon.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_NMT_COMMON_HPP
+#define SHARE_VM_SERVICES_NMT_COMMON_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+#define CALC_OBJ_SIZE_IN_TYPE(obj, type) (align_size_up_(sizeof(obj), sizeof(type))/sizeof(type))
+
+// Data type for memory counters
+#ifdef _LP64
+  typedef jlong    MemoryCounterType;
+#else
+  typedef jint     MemoryCounterType;
+#endif
+
+// Native memory tracking level
+enum NMT_TrackingLevel {
+  NMT_unknown = 0xFF,
+  NMT_off     = 0x00,
+  NMT_minimal = 0x01,
+  NMT_summary = 0x02,
+  NMT_detail  = 0x03
+};
+
+// Number of stack frames to capture. This is a
+// build time decision.
+const int NMT_TrackingStackDepth = 4;
+
+class NativeCallStack;
+extern NativeCallStack emptyStack;
+
+// A few common utilities for native memory tracking
+class NMTUtil : AllStatic {
+ public:
+  // Map memory type to index
+  static inline int flag_to_index(MEMFLAGS flag) {
+    return (flag & 0xff);
+  }
+
+  // Map memory type to human readable name
+  static const char* flag_to_name(MEMFLAGS flag) {
+    return _memory_type_names[flag_to_index(flag)];
+  }
+
+  // Map an index to memory type
+  static MEMFLAGS index_to_flag(int index) {
+    return (MEMFLAGS)index;
+  }
+
+  // Memory size scale
+  static const char* scale_name(size_t scale);
+  static size_t scale_from_name(const char* scale);
+
+  // Translate memory size in specified scale
+  static size_t amount_in_scale(size_t amount, size_t scale) {
+    return (amount + scale / 2) / scale;
+  }
+ private:
+  static const char* _memory_type_names[mt_number_of_types];
+};
+
+
+#endif
--- a/hotspot/src/share/vm/services/nmtDCmd.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/services/nmtDCmd.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -22,6 +22,8 @@
  *
  */
 #include "precompiled.hpp"
+
+#include "runtime/mutexLocker.hpp"
 #include "services/nmtDCmd.hpp"
 #include "services/memReporter.hpp"
 #include "services/memTracker.hpp"
@@ -49,13 +51,8 @@
   _shutdown("shutdown", "request runtime to shutdown itself and free the " \
             "memory used by runtime.",
             "BOOLEAN", false, "false"),
-  _auto_shutdown("autoShutdown", "automatically shutdown itself under "    \
-            "stress situation",
-            "BOOLEAN", true, "true"),
-#ifndef PRODUCT
-  _debug("debug", "print tracker statistics. Debug only, not thread safe", \
+  _statistics("statistics", "print tracker statistics for tuning purpose.", \
             "BOOLEAN", false, "false"),
-#endif
   _scale("scale", "Memory usage in which scale, KB, MB or GB",
        "STRING", false, "KB") {
   _dcmdparser.add_dcmd_option(&_summary);
@@ -64,25 +61,30 @@
   _dcmdparser.add_dcmd_option(&_summary_diff);
   _dcmdparser.add_dcmd_option(&_detail_diff);
   _dcmdparser.add_dcmd_option(&_shutdown);
-  _dcmdparser.add_dcmd_option(&_auto_shutdown);
-#ifndef PRODUCT
-  _dcmdparser.add_dcmd_option(&_debug);
-#endif
+  _dcmdparser.add_dcmd_option(&_statistics);
   _dcmdparser.add_dcmd_option(&_scale);
 }
 
+
+size_t NMTDCmd::get_scale(const char* scale) const {
+  if (scale == NULL) return 0;
+  return NMTUtil::scale_from_name(scale);
+}
+
 void NMTDCmd::execute(DCmdSource source, TRAPS) {
+  // Check NMT state
+  //  native memory tracking has to be on
+  if (MemTracker::tracking_level() == NMT_off) {
+    output()->print_cr("Native memory tracking is not enabled");
+    return;
+  } else if (MemTracker::tracking_level() == NMT_minimal) {
+     output()->print_cr("Native memory tracking has been shutdown");
+     return;
+  }
+
   const char* scale_value = _scale.value();
-  size_t scale_unit;
-  if (strcmp(scale_value, "KB") == 0 || strcmp(scale_value, "kb") == 0) {
-    scale_unit = K;
-  } else if (strcmp(scale_value, "MB") == 0 ||
-             strcmp(scale_value, "mb") == 0) {
-    scale_unit = M;
-  } else if (strcmp(scale_value, "GB") == 0 ||
-             strcmp(scale_value, "gb") == 0) {
-    scale_unit = G;
-  } else {
+  size_t scale_unit = get_scale(scale_value);
+  if (scale_unit == 0) {
     output()->print_cr("Incorrect scale value: %s", scale_value);
     return;
   }
@@ -94,19 +96,11 @@
   if (_summary_diff.is_set() && _summary_diff.value()) { ++nopt; }
   if (_detail_diff.is_set() && _detail_diff.value()) { ++nopt; }
   if (_shutdown.is_set() && _shutdown.value()) { ++nopt; }
-  if (_auto_shutdown.is_set()) { ++nopt; }
-
-#ifndef PRODUCT
-  if (_debug.is_set() && _debug.value()) { ++nopt; }
-#endif
+  if (_statistics.is_set() && _statistics.value()) { ++nopt; }
 
   if (nopt > 1) {
       output()->print_cr("At most one of the following option can be specified: " \
-        "summary, detail, baseline, summary.diff, detail.diff, shutdown"
-#ifndef PRODUCT
-        ", debug"
-#endif
-      );
+        "summary, detail, baseline, summary.diff, detail.diff, shutdown");
       return;
   } else if (nopt == 0) {
     if (_summary.is_set()) {
@@ -117,53 +111,47 @@
     }
   }
 
-#ifndef PRODUCT
-  if (_debug.value()) {
-    output()->print_cr("debug command is NOT thread-safe, may cause crash");
-    MemTracker::print_tracker_stats(output());
-    return;
-  }
-#endif
-
-  // native memory tracking has to be on
-  if (!MemTracker::is_on() || MemTracker::shutdown_in_progress()) {
-    // if it is not on, what's the reason?
-    output()->print_cr("%s", MemTracker::reason());
-    return;
-  }
+  // Serialize NMT query
+  MutexLocker locker(MemTracker::query_lock());
 
   if (_summary.value()) {
-    BaselineTTYOutputer outputer(output());
-    MemTracker::print_memory_usage(outputer, scale_unit, true);
+    report(true, scale_unit);
   } else if (_detail.value()) {
-    BaselineTTYOutputer outputer(output());
-    MemTracker::print_memory_usage(outputer, scale_unit, false);
+    if (!check_detail_tracking_level(output())) {
+    return;
+  }
+    report(false, scale_unit);
   } else if (_baseline.value()) {
-    if (MemTracker::baseline()) {
-      output()->print_cr("Successfully baselined.");
+    MemBaseline& baseline = MemTracker::get_baseline();
+    if (!baseline.baseline(MemTracker::tracking_level() != NMT_detail)) {
+      output()->print_cr("Baseline failed");
     } else {
-      output()->print_cr("Baseline failed.");
+      output()->print_cr("Baseline succeeded");
     }
   } else if (_summary_diff.value()) {
-    if (MemTracker::has_baseline()) {
-      BaselineTTYOutputer outputer(output());
-      MemTracker::compare_memory_usage(outputer, scale_unit, true);
+    MemBaseline& baseline = MemTracker::get_baseline();
+    if (baseline.baseline_type() >= MemBaseline::Summary_baselined) {
+      report_diff(true, scale_unit);
     } else {
-      output()->print_cr("No baseline to compare, run 'baseline' command first");
+      output()->print_cr("No baseline for comparison");
     }
   } else if (_detail_diff.value()) {
-    if (MemTracker::has_baseline()) {
-      BaselineTTYOutputer outputer(output());
-      MemTracker::compare_memory_usage(outputer, scale_unit, false);
+    if (!check_detail_tracking_level(output())) {
+    return;
+  }
+    MemBaseline& baseline = MemTracker::get_baseline();
+    if (baseline.baseline_type() == MemBaseline::Detail_baselined) {
+      report_diff(false, scale_unit);
     } else {
-      output()->print_cr("No baseline to compare to, run 'baseline' command first");
+      output()->print_cr("No detail baseline for comparison");
     }
   } else if (_shutdown.value()) {
-    MemTracker::shutdown(MemTracker::NMT_shutdown_user);
-    output()->print_cr("Shutdown is in progress, it will take a few moments to " \
-      "completely shutdown");
-  } else if (_auto_shutdown.is_set()) {
-    MemTracker::set_autoShutdown(_auto_shutdown.value());
+    MemTracker::shutdown();
+    output()->print_cr("Native memory tracking has been turned off");
+  } else if (_statistics.value()) {
+    if (check_detail_tracking_level(output())) {
+      MemTracker::tuning_statistics(output());
+    }
   } else {
     ShouldNotReachHere();
     output()->print_cr("Unknown command");
@@ -181,3 +169,46 @@
   }
 }
 
+void NMTDCmd::report(bool summaryOnly, size_t scale_unit) {
+  MemBaseline baseline;
+  if (baseline.baseline(summaryOnly)) {
+    if (summaryOnly) {
+      MemSummaryReporter rpt(baseline, output(), scale_unit);
+      rpt.report();
+    } else {
+      MemDetailReporter rpt(baseline, output(), scale_unit);
+      rpt.report();
+    }
+  }
+}
+
+void NMTDCmd::report_diff(bool summaryOnly, size_t scale_unit) {
+  MemBaseline& early_baseline = MemTracker::get_baseline();
+  assert(early_baseline.baseline_type() != MemBaseline::Not_baselined,
+    "Not yet baselined");
+  assert(summaryOnly || early_baseline.baseline_type() == MemBaseline::Detail_baselined,
+    "Not a detail baseline");
+
+  MemBaseline baseline;
+  if (baseline.baseline(summaryOnly)) {
+    if (summaryOnly) {
+      MemSummaryDiffReporter rpt(early_baseline, baseline, output(), scale_unit);
+      rpt.report_diff();
+    } else {
+      MemDetailDiffReporter rpt(early_baseline, baseline, output(), scale_unit);
+      rpt.report_diff();
+    }
+  }
+}
+
+bool NMTDCmd::check_detail_tracking_level(outputStream* out) {
+  if (MemTracker::tracking_level() == NMT_detail) {
+    return true;
+  } else if (MemTracker::cmdline_tracking_level() == NMT_detail) {
+    out->print_cr("Tracking level has been downgraded due to lack of resources");
+    return false;
+  } else {
+    out->print_cr("Detail tracking is not enabled");
+    return false;
+  }
+}
--- a/hotspot/src/share/vm/services/nmtDCmd.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/services/nmtDCmd.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,8 +25,12 @@
 #ifndef SHARE_VM_SERVICES_NMT_DCMD_HPP
 #define SHARE_VM_SERVICES_NMT_DCMD_HPP
 
+#if INCLUDE_NMT
+
 #include "services/diagnosticArgument.hpp"
 #include "services/diagnosticFramework.hpp"
+#include "services/memBaseline.hpp"
+#include "services/mallocTracker.hpp"
 
 /**
  * Native memory tracking DCmd implementation
@@ -39,10 +43,7 @@
   DCmdArgument<bool>  _summary_diff;
   DCmdArgument<bool>  _detail_diff;
   DCmdArgument<bool>  _shutdown;
-  DCmdArgument<bool>  _auto_shutdown;
-#ifndef PRODUCT
-  DCmdArgument<bool>  _debug;
-#endif
+  DCmdArgument<bool>  _statistics;
   DCmdArgument<char*> _scale;
 
  public:
@@ -61,6 +62,17 @@
   }
   static int num_arguments();
   virtual void execute(DCmdSource source, TRAPS);
+
+ private:
+  void report(bool summaryOnly, size_t scale);
+  void report_diff(bool summaryOnly, size_t scale);
+
+  size_t get_scale(const char* scale) const;
+
+  // check if NMT running at detail tracking level
+  bool check_detail_tracking_level(outputStream* out);
 };
 
+#endif // INCLUDE_NMT
+
 #endif // SHARE_VM_SERVICES_NMT_DCMD_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/virtualMemoryTracker.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,448 @@
+/*
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+
+#include "runtime/threadCritical.hpp"
+#include "services/virtualMemoryTracker.hpp"
+
+size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
+
+void VirtualMemorySummary::initialize() {
+  assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
+  // Use placement operator new to initialize static data area.
+  ::new ((void*)_snapshot) VirtualMemorySnapshot();
+}
+
+SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base> VirtualMemoryTracker::_reserved_regions;
+
+int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
+  return r1.compare(r2);
+}
+
+int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
+  return r1.compare(r2);
+}
+
+bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
+  assert(addr != NULL, "Invalid address");
+  assert(size > 0, "Invalid size");
+  assert(contain_region(addr, size), "Not contain this region");
+
+  if (all_committed()) return true;
+
+  CommittedMemoryRegion committed_rgn(addr, size, stack);
+  LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.find_node(committed_rgn);
+  if (node != NULL) {
+    CommittedMemoryRegion* rgn = node->data();
+    if (rgn->same_region(addr, size)) {
+      return true;
+    }
+
+    if (rgn->adjacent_to(addr, size)) {
+      // check if the next region covers this committed region,
+      // the regions may not be merged due to different call stacks
+      LinkedListNode<CommittedMemoryRegion>* next =
+        node->next();
+      if (next != NULL && next->data()->contain_region(addr, size)) {
+        if (next->data()->same_region(addr, size)) {
+          next->data()->set_call_stack(stack);
+        }
+        return true;
+      }
+      if (rgn->call_stack()->equals(stack)) {
+        VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag());
+        // the two adjacent regions have the same call stack, merge them
+        rgn->expand_region(addr, size);
+        VirtualMemorySummary::record_committed_memory(rgn->size(), flag());
+        return true;
+      }
+      VirtualMemorySummary::record_committed_memory(size, flag());
+      if (rgn->base() > addr) {
+        return _committed_regions.insert_before(committed_rgn, node) != NULL;
+      } else {
+        return _committed_regions.insert_after(committed_rgn, node) != NULL;
+      }
+    }
+    assert(rgn->contain_region(addr, size), "Must cover this region");
+    return true;
+  } else {
+    // New committed region
+    VirtualMemorySummary::record_committed_memory(size, flag());
+    return add_committed_region(committed_rgn);
+  }
+}
+
+void ReservedMemoryRegion::set_all_committed(bool b) {
+  if (all_committed() != b) {
+    _all_committed = b;
+    if (b) {
+      VirtualMemorySummary::record_committed_memory(size(), flag());
+    }
+  }
+}
+
+bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
+  address addr, size_t size) {
+  assert(addr != NULL, "Invalid address");
+  assert(size > 0, "Invalid size");
+
+  CommittedMemoryRegion* rgn = node->data();
+  assert(rgn->contain_region(addr, size), "Has to be contained");
+  assert(!rgn->same_region(addr, size), "Can not be the same region");
+
+  if (rgn->base() == addr ||
+      rgn->end() == addr + size) {
+    rgn->exclude_region(addr, size);
+    return true;
+  } else {
+    // split this region
+    address top =rgn->end();
+    // use this region for lower part
+    size_t exclude_size = rgn->end() - addr;
+    rgn->exclude_region(addr, exclude_size);
+
+    // higher part
+    address high_base = addr + size;
+    size_t  high_size = top - high_base;
+
+    CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack());
+    LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn);
+    assert(high_node == NULL || node->next() == high_node, "Should be right after");
+    return (high_node != NULL);
+  }
+
+  return false;
+}
+
+bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
+  // uncommit stack guard pages
+  if (flag() == mtThreadStack && !same_region(addr, sz)) {
+    return true;
+  }
+
+  assert(addr != NULL, "Invalid address");
+  assert(sz > 0, "Invalid size");
+
+  if (all_committed()) {
+    assert(_committed_regions.is_empty(), "Sanity check");
+    assert(contain_region(addr, sz), "Reserved region does not contain this region");
+    set_all_committed(false);
+    VirtualMemorySummary::record_uncommitted_memory(sz, flag());
+    if (same_region(addr, sz)) {
+      return true;
+    } else {
+      CommittedMemoryRegion rgn(base(), size(), *call_stack());
+      if (rgn.base() == addr || rgn.end() == (addr + sz)) {
+        rgn.exclude_region(addr, sz);
+        return add_committed_region(rgn);
+      } else {
+        // split this region
+        // top of the whole region
+        address top =rgn.end();
+        // use this region for lower part
+        size_t exclude_size = rgn.end() - addr;
+        rgn.exclude_region(addr, exclude_size);
+        if (add_committed_region(rgn)) {
+          // higher part
+          address high_base = addr + sz;
+          size_t  high_size = top - high_base;
+          CommittedMemoryRegion high_rgn(high_base, high_size, emptyStack);
+          return add_committed_region(high_rgn);
+        } else {
+          return false;
+        }
+      }
+    }
+  } else {
+    // we have to walk whole list to remove the committed regions in
+    // specified range
+    LinkedListNode<CommittedMemoryRegion>* head =
+      _committed_regions.head();
+    LinkedListNode<CommittedMemoryRegion>* prev = NULL;
+    VirtualMemoryRegion uncommitted_rgn(addr, sz);
+
+    while (head != NULL && !uncommitted_rgn.is_empty()) {
+      CommittedMemoryRegion* crgn = head->data();
+      // this committed region overlaps to region to uncommit
+      if (crgn->overlap_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
+        if (crgn->same_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
+          // find matched region, remove the node will do
+          VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag());
+          _committed_regions.remove_after(prev);
+          return true;
+        } else if (crgn->contain_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
+          // this committed region contains whole uncommitted region
+          VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag());
+          return remove_uncommitted_region(head, uncommitted_rgn.base(), uncommitted_rgn.size());
+        } else if (uncommitted_rgn.contain_region(crgn->base(), crgn->size())) {
+          // this committed region has been uncommitted
+          size_t exclude_size = crgn->end() - uncommitted_rgn.base();
+          uncommitted_rgn.exclude_region(uncommitted_rgn.base(), exclude_size);
+          VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
+          LinkedListNode<CommittedMemoryRegion>* tmp = head;
+          head = head->next();
+          _committed_regions.remove_after(prev);
+          continue;
+        } else if (crgn->contain_address(uncommitted_rgn.base())) {
+          size_t toUncommitted = crgn->end() - uncommitted_rgn.base();
+          crgn->exclude_region(uncommitted_rgn.base(), toUncommitted);
+          uncommitted_rgn.exclude_region(uncommitted_rgn.base(), toUncommitted);
+          VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
+        } else if (uncommitted_rgn.contain_address(crgn->base())) {
+          size_t toUncommitted = uncommitted_rgn.end() - crgn->base();
+          crgn->exclude_region(crgn->base(), toUncommitted);
+          uncommitted_rgn.exclude_region(uncommitted_rgn.end() - toUncommitted,
+            toUncommitted);
+          VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
+        }
+      }
+      prev = head;
+      head = head->next();
+    }
+  }
+
+  return true;
+}
+
+void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) {
+  assert(addr != NULL, "Invalid address");
+
+  // split committed regions
+  LinkedListNode<CommittedMemoryRegion>* head =
+    _committed_regions.head();
+  LinkedListNode<CommittedMemoryRegion>* prev = NULL;
+
+  while (head != NULL) {
+    if (head->data()->base() >= addr) {
+      break;
+    }
+    prev = head;
+    head = head->next();
+  }
+
+  if (head != NULL) {
+    if (prev != NULL) {
+      prev->set_next(head->next());
+    } else {
+      _committed_regions.set_head(NULL);
+    }
+  }
+
+  rgn._committed_regions.set_head(head);
+}
+
+size_t ReservedMemoryRegion::committed_size() const {
+  if (all_committed()) {
+    return size();
+  } else {
+    size_t committed = 0;
+    LinkedListNode<CommittedMemoryRegion>* head =
+      _committed_regions.head();
+    while (head != NULL) {
+      committed += head->data()->size();
+      head = head->next();
+    }
+    return committed;
+  }
+}
+
+void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
+  assert((flag() == mtNone || flag() == f), "Overwrite memory type");
+  if (flag() != f) {
+    VirtualMemorySummary::move_reserved_memory(flag(), f, size());
+    VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
+    _flag = f;
+  }
+}
+
+bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
+  if (level >= NMT_summary) {
+    VirtualMemorySummary::initialize();
+  }
+  return true;
+}
+
+bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
+   const NativeCallStack& stack, MEMFLAGS flag, bool all_committed) {
+  assert(base_addr != NULL, "Invalid address");
+  assert(size > 0, "Invalid size");
+
+  ReservedMemoryRegion  rgn(base_addr, size, stack, flag);
+  ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
+  LinkedListNode<ReservedMemoryRegion>* node;
+  if (reserved_rgn == NULL) {
+    VirtualMemorySummary::record_reserved_memory(size, flag);
+    node = _reserved_regions.add(rgn);
+    if (node != NULL) {
+      node->data()->set_all_committed(all_committed);
+      return true;
+    } else {
+      return false;
+    }
+  } else {
+    if (reserved_rgn->same_region(base_addr, size)) {
+      reserved_rgn->set_call_stack(stack);
+      reserved_rgn->set_flag(flag);
+      return true;
+    } else if (reserved_rgn->adjacent_to(base_addr, size)) {
+      VirtualMemorySummary::record_reserved_memory(size, flag);
+      reserved_rgn->expand_region(base_addr, size);
+      reserved_rgn->set_call_stack(stack);
+      return true;
+    } else {
+      // Overlapped reservation.
+      // It can happen when the regions are thread stacks, as JNI
+      // thread does not detach from VM before exits, and leads to
+      // leak JavaThread object
+      if (reserved_rgn->flag() == mtThreadStack) {
+        guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached");
+        // Overwrite with new region
+
+        // Release old region
+        VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag());
+        VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag());
+
+        // Add new region
+        VirtualMemorySummary::record_reserved_memory(rgn.size(), flag);
+
+        *reserved_rgn = rgn;
+        return true;
+      } else {
+        ShouldNotReachHere();
+        return false;
+      }
+    }
+  }
+}
+
+void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) {
+  assert(addr != NULL, "Invalid address");
+
+  ReservedMemoryRegion   rgn(addr, 1);
+  ReservedMemoryRegion*  reserved_rgn = _reserved_regions.find(rgn);
+  if (reserved_rgn != NULL) {
+    assert(reserved_rgn->contain_address(addr), "Containment");
+    if (reserved_rgn->flag() != flag) {
+      assert(reserved_rgn->flag() == mtNone, "Overwrite memory type");
+      reserved_rgn->set_flag(flag);
+    }
+  }
+}
+
+bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
+  const NativeCallStack& stack) {
+  assert(addr != NULL, "Invalid address");
+  assert(size > 0, "Invalid size");
+  ReservedMemoryRegion  rgn(addr, size);
+  ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
+
+  assert(reserved_rgn != NULL, "No reserved region");
+  assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
+  return reserved_rgn->add_committed_region(addr, size, stack);
+}
+
+bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
+  assert(addr != NULL, "Invalid address");
+  assert(size > 0, "Invalid size");
+  ReservedMemoryRegion  rgn(addr, size);
+  ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
+  assert(reserved_rgn != NULL, "No reserved region");
+  assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
+  return reserved_rgn->remove_uncommitted_region(addr, size);
+}
+
+bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
+  assert(addr != NULL, "Invalid address");
+  assert(size > 0, "Invalid size");
+
+  ReservedMemoryRegion  rgn(addr, size);
+  ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
+
+  assert(reserved_rgn != NULL, "No reserved region");
+
+  // uncommit regions within the released region
+  if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
+    return false;
+  }
+
+
+  VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag());
+
+  if (reserved_rgn->same_region(addr, size)) {
+    return _reserved_regions.remove(rgn);
+  } else {
+    assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
+    if (reserved_rgn->base() == addr ||
+        reserved_rgn->end() == addr + size) {
+        reserved_rgn->exclude_region(addr, size);
+      return true;
+    } else {
+      address top = reserved_rgn->end();
+      address high_base = addr + size;
+      ReservedMemoryRegion high_rgn(high_base, top - high_base,
+        *reserved_rgn->call_stack(), reserved_rgn->flag());
+
+      // use original region for lower region
+      reserved_rgn->exclude_region(addr, top - addr);
+      LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions.add(high_rgn);
+      if (new_rgn == NULL) {
+        return false;
+      } else {
+        reserved_rgn->move_committed_regions(addr, *new_rgn->data());
+        return true;
+      }
+    }
+  }
+}
+
+
+bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
+  ThreadCritical tc;
+  LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions.head();
+  while (head != NULL) {
+    const ReservedMemoryRegion* rgn = head->peek();
+    if (!walker->do_allocation_site(rgn)) {
+      return false;
+    }
+    head = head->next();
+  }
+  return true;
+}
+
+// Transition virtual memory tracking level.
+bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
+  if (from == NMT_minimal) {
+    assert(to == NMT_summary || to == NMT_detail, "Just check");
+    VirtualMemorySummary::reset();
+  } else if (to == NMT_minimal) {
+    assert(from == NMT_summary || from == NMT_detail, "Just check");
+    // Clean up virtual memory tracking data structures.
+    ThreadCritical tc;
+    _reserved_regions.clear();
+  }
+
+  return true;
+}
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/virtualMemoryTracker.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,437 @@
+/*
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
+#define SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
+
+#if INCLUDE_NMT
+
+#include "memory/allocation.hpp"
+#include "services/allocationSite.hpp"
+#include "services/nmtCommon.hpp"
+#include "utilities/linkedlist.hpp"
+#include "utilities/nativeCallStack.hpp"
+#include "utilities/ostream.hpp"
+
+
+/*
+ * Virtual memory counter
+ */
+class VirtualMemory VALUE_OBJ_CLASS_SPEC {
+ private:
+  size_t     _reserved;
+  size_t     _committed;
+
+ public:
+  VirtualMemory() : _reserved(0), _committed(0) { }
+
+  inline void reserve_memory(size_t sz) { _reserved += sz; }
+  inline void commit_memory (size_t sz) {
+    _committed += sz;
+    assert(_committed <= _reserved, "Sanity check");
+  }
+
+  inline void release_memory (size_t sz) {
+    assert(_reserved >= sz, "Negative amount");
+    _reserved -= sz;
+  }
+
+  inline void uncommit_memory(size_t sz) {
+    assert(_committed >= sz, "Negative amount");
+    _committed -= sz;
+  }
+
+  void reset() {
+    _reserved  = 0;
+    _committed = 0;
+  }
+
+  inline size_t reserved()  const { return _reserved;  }
+  inline size_t committed() const { return _committed; }
+};
+
+// Virtual memory allocation site, keeps track where the virtual memory is reserved.
+class VirtualMemoryAllocationSite : public AllocationSite<VirtualMemory> {
+ public:
+  VirtualMemoryAllocationSite(const NativeCallStack& stack) :
+    AllocationSite<VirtualMemory>(stack) { }
+
+  inline void reserve_memory(size_t sz)  { data()->reserve_memory(sz);  }
+  inline void commit_memory (size_t sz)  { data()->commit_memory(sz);   }
+  inline void uncommit_memory(size_t sz) { data()->uncommit_memory(sz); }
+  inline void release_memory(size_t sz)  { data()->release_memory(sz);  }
+  inline size_t reserved() const  { return peek()->reserved(); }
+  inline size_t committed() const { return peek()->committed(); }
+};
+
+class VirtualMemorySummary;
+
+// This class represents a snapshot of virtual memory at a given time.
+// The latest snapshot is saved in a static area.
+class VirtualMemorySnapshot : public ResourceObj {
+  friend class VirtualMemorySummary;
+
+ private:
+  VirtualMemory  _virtual_memory[mt_number_of_types];
+
+ public:
+  inline VirtualMemory* by_type(MEMFLAGS flag) {
+    int index = NMTUtil::flag_to_index(flag);
+    return &_virtual_memory[index];
+  }
+
+  inline VirtualMemory* by_index(int index) {
+    assert(index >= 0, "Index out of bound");
+    assert(index < mt_number_of_types, "Index out of bound");
+    return &_virtual_memory[index];
+  }
+
+  inline size_t total_reserved() const {
+    size_t amount = 0;
+    for (int index = 0; index < mt_number_of_types; index ++) {
+      amount += _virtual_memory[index].reserved();
+    }
+    return amount;
+  }
+
+  inline size_t total_committed() const {
+    size_t amount = 0;
+    for (int index = 0; index < mt_number_of_types; index ++) {
+      amount += _virtual_memory[index].committed();
+    }
+    return amount;
+  }
+
+  inline void reset() {
+    for (int index = 0; index < mt_number_of_types; index ++) {
+      _virtual_memory[index].reset();
+    }
+  }
+
+  void copy_to(VirtualMemorySnapshot* s) {
+    for (int index = 0; index < mt_number_of_types; index ++) {
+      s->_virtual_memory[index] = _virtual_memory[index];
+    }
+  }
+};
+
+class VirtualMemorySummary : AllStatic {
+ public:
+  static void initialize();
+
+  static inline void record_reserved_memory(size_t size, MEMFLAGS flag) {
+    as_snapshot()->by_type(flag)->reserve_memory(size);
+  }
+
+  static inline void record_committed_memory(size_t size, MEMFLAGS flag) {
+    as_snapshot()->by_type(flag)->commit_memory(size);
+  }
+
+  static inline void record_uncommitted_memory(size_t size, MEMFLAGS flag) {
+    as_snapshot()->by_type(flag)->uncommit_memory(size);
+  }
+
+  static inline void record_released_memory(size_t size, MEMFLAGS flag) {
+    as_snapshot()->by_type(flag)->release_memory(size);
+  }
+
+  // Move virtual memory from one memory type to another.
+  // Virtual memory can be reserved before it is associated with a memory type, and tagged
+  // as 'unknown'. Once the memory is tagged, the virtual memory will be moved from 'unknown'
+  // type to specified memory type.
+  static inline void move_reserved_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
+    as_snapshot()->by_type(from)->release_memory(size);
+    as_snapshot()->by_type(to)->reserve_memory(size);
+  }
+
+  static inline void move_committed_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
+    as_snapshot()->by_type(from)->uncommit_memory(size);
+    as_snapshot()->by_type(to)->commit_memory(size);
+  }
+
+  static inline void snapshot(VirtualMemorySnapshot* s) {
+    as_snapshot()->copy_to(s);
+  }
+
+  static inline void reset() {
+    as_snapshot()->reset();
+  }
+
+  static VirtualMemorySnapshot* as_snapshot() {
+    return (VirtualMemorySnapshot*)_snapshot;
+  }
+
+ private:
+  static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
+};
+
+
+
+/*
+ * A virtual memory region
+ */
+class VirtualMemoryRegion VALUE_OBJ_CLASS_SPEC {
+ private:
+  address      _base_address;
+  size_t       _size;
+
+ public:
+  VirtualMemoryRegion(address addr, size_t size) :
+    _base_address(addr), _size(size) {
+     assert(addr != NULL, "Invalid address");
+     assert(size > 0, "Invalid size");
+   }
+
+  inline address base() const { return _base_address;   }
+  inline address end()  const { return base() + size(); }
+  inline size_t  size() const { return _size;           }
+
+  inline bool is_empty() const { return size() == 0; }
+
+  inline bool contain_address(address addr) const {
+    return (addr >= base() && addr < end());
+  }
+
+
+  inline bool contain_region(address addr, size_t size) const {
+    return contain_address(addr) && contain_address(addr + size - 1);
+  }
+
+  inline bool same_region(address addr, size_t sz) const {
+    return (addr == base() && sz == size());
+  }
+
+
+  inline bool overlap_region(address addr, size_t sz) const {
+    VirtualMemoryRegion rgn(addr, sz);
+    return contain_address(addr) ||
+           contain_address(addr + sz - 1) ||
+           rgn.contain_address(base()) ||
+           rgn.contain_address(end() - 1);
+  }
+
+  inline bool adjacent_to(address addr, size_t sz) const {
+    return (addr == end() || (addr + sz) == base());
+  }
+
+  void exclude_region(address addr, size_t sz) {
+    assert(contain_region(addr, sz), "Not containment");
+    assert(addr == base() || addr + sz == end(), "Can not exclude from middle");
+    size_t new_size = size() - sz;
+
+    if (addr == base()) {
+      set_base(addr + sz);
+    }
+    set_size(new_size);
+  }
+
+  void expand_region(address addr, size_t sz) {
+    assert(adjacent_to(addr, sz), "Not adjacent regions");
+    if (base() == addr + sz) {
+      set_base(addr);
+    }
+    set_size(size() + sz);
+  }
+
+ protected:
+  void set_base(address base) {
+    assert(base != NULL, "Sanity check");
+    _base_address = base;
+  }
+
+  void set_size(size_t  size) {
+    assert(size > 0, "Sanity check");
+    _size = size;
+  }
+};
+
+
+class CommittedMemoryRegion : public VirtualMemoryRegion {
+ private:
+  NativeCallStack  _stack;
+
+ public:
+  CommittedMemoryRegion(address addr, size_t size, const NativeCallStack& stack) :
+    VirtualMemoryRegion(addr, size), _stack(stack) { }
+
+  inline int compare(const CommittedMemoryRegion& rgn) const {
+    if (overlap_region(rgn.base(), rgn.size()) ||
+        adjacent_to   (rgn.base(), rgn.size())) {
+      return 0;
+    } else {
+      if (base() == rgn.base()) {
+        return 0;
+      } else if (base() > rgn.base()) {
+        return 1;
+      } else {
+        return -1;
+      }
+    }
+  }
+
+  inline bool equals(const CommittedMemoryRegion& rgn) const {
+    return compare(rgn) == 0;
+  }
+
+  inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; }
+  inline const NativeCallStack* call_stack() const         { return &_stack; }
+};
+
+
+typedef LinkedListIterator<CommittedMemoryRegion> CommittedRegionIterator;
+
+int compare_committed_region(const CommittedMemoryRegion&, const CommittedMemoryRegion&);
+class ReservedMemoryRegion : public VirtualMemoryRegion {
+ private:
+  SortedLinkedList<CommittedMemoryRegion, compare_committed_region>
+    _committed_regions;
+
+  NativeCallStack  _stack;
+  MEMFLAGS         _flag;
+
+  bool             _all_committed;
+
+ public:
+  ReservedMemoryRegion(address base, size_t size, const NativeCallStack& stack,
+    MEMFLAGS flag = mtNone) :
+    VirtualMemoryRegion(base, size), _stack(stack), _flag(flag),
+    _all_committed(false) { }
+
+
+  ReservedMemoryRegion(address base, size_t size) :
+    VirtualMemoryRegion(base, size), _stack(emptyStack), _flag(mtNone),
+    _all_committed(false) { }
+
+  // Copy constructor
+  ReservedMemoryRegion(const ReservedMemoryRegion& rr) :
+    VirtualMemoryRegion(rr.base(), rr.size()) {
+    *this = rr;
+  }
+
+  inline void  set_call_stack(const NativeCallStack& stack) { _stack = stack; }
+  inline const NativeCallStack* call_stack() const          { return &_stack;  }
+
+  void  set_flag(MEMFLAGS flag);
+  inline MEMFLAGS flag() const            { return _flag;  }
+
+  inline int compare(const ReservedMemoryRegion& rgn) const {
+    if (overlap_region(rgn.base(), rgn.size())) {
+      return 0;
+    } else {
+      if (base() == rgn.base()) {
+        return 0;
+      } else if (base() > rgn.base()) {
+        return 1;
+      } else {
+        return -1;
+      }
+    }
+  }
+
+  inline bool equals(const ReservedMemoryRegion& rgn) const {
+    return compare(rgn) == 0;
+  }
+
+  bool    add_committed_region(address addr, size_t size, const NativeCallStack& stack);
+  bool    remove_uncommitted_region(address addr, size_t size);
+
+  size_t  committed_size() const;
+
+  // move committed regions that higher than specified address to
+  // the new region
+  void    move_committed_regions(address addr, ReservedMemoryRegion& rgn);
+
+  inline bool all_committed() const { return _all_committed; }
+  void        set_all_committed(bool b);
+
+  CommittedRegionIterator iterate_committed_regions() const {
+    return CommittedRegionIterator(_committed_regions.head());
+  }
+
+  ReservedMemoryRegion& operator= (const ReservedMemoryRegion& other) {
+    set_base(other.base());
+    set_size(other.size());
+
+    _stack =         *other.call_stack();
+    _flag  =         other.flag();
+    _all_committed = other.all_committed();
+    if (other.all_committed()) {
+      set_all_committed(true);
+    } else {
+      CommittedRegionIterator itr = other.iterate_committed_regions();
+      const CommittedMemoryRegion* rgn = itr.next();
+      while (rgn != NULL) {
+        _committed_regions.add(*rgn);
+        rgn = itr.next();
+      }
+    }
+    return *this;
+  }
+
+ private:
+  // The committed region contains the uncommitted region, subtract the uncommitted
+  // region from this committed region
+  bool remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
+    address addr, size_t sz);
+
+  bool add_committed_region(const CommittedMemoryRegion& rgn) {
+    assert(rgn.base() != NULL, "Invalid base address");
+    assert(size() > 0, "Invalid size");
+    return _committed_regions.add(rgn) != NULL;
+  }
+};
+
+int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2);
+
+class VirtualMemoryWalker : public StackObj {
+ public:
+   virtual bool do_allocation_site(const ReservedMemoryRegion* rgn) { return false; }
+};
+
+// Main class called from MemTracker to track virtual memory allocations, commits and releases.
+class VirtualMemoryTracker : AllStatic {
+ public:
+  static bool initialize(NMT_TrackingLevel level);
+
+  static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack,
+    MEMFLAGS flag = mtNone, bool all_committed = false);
+
+  static bool add_committed_region      (address base_addr, size_t size, const NativeCallStack& stack);
+  static bool remove_uncommitted_region (address base_addr, size_t size);
+  static bool remove_released_region    (address base_addr, size_t size);
+  static void set_reserved_region_type  (address addr, MEMFLAGS flag);
+
+  // Walk virtual memory data structure for creating baseline, etc.
+  static bool walk_virtual_memory(VirtualMemoryWalker* walker);
+
+  static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
+
+ private:
+  static SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base> _reserved_regions;
+};
+
+
+#endif // INCLUDE_NMT
+
+#endif // SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
--- a/hotspot/src/share/vm/shark/sharkBuilder.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/shark/sharkBuilder.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -413,7 +413,7 @@
   const char *name;
   if (value->hasName())
     // XXX this leaks, but it's only debug code
-    name = strdup(value->getName().str().c_str());
+    name = os::strdup(value->getName().str().c_str());
   else
     name = "unnamed_value";
 
--- a/hotspot/src/share/vm/utilities/bitMap.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/utilities/bitMap.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -567,7 +567,7 @@
   }
 
   static void testResizeNonResource() {
-    const uintx bitmap_bytes = BITMAP_SIZE / BitsPerByte;
+    const size_t bitmap_bytes = BITMAP_SIZE / BitsPerByte;
 
     // Test the default behavior
     testResize(false);
@@ -575,13 +575,13 @@
     {
       // Make sure that AllocatorMallocLimit is larger than our allocation request
       // forcing it to call standard malloc()
-      UIntFlagSetting fs(ArrayAllocatorMallocLimit, bitmap_bytes * 4);
+      SizeTFlagSetting fs(ArrayAllocatorMallocLimit, bitmap_bytes * 4);
       testResize(false);
     }
     {
       // Make sure that AllocatorMallocLimit is smaller than our allocation request
       // forcing it to call mmap() (or equivalent)
-      UIntFlagSetting fs(ArrayAllocatorMallocLimit, bitmap_bytes / 4);
+      SizeTFlagSetting fs(ArrayAllocatorMallocLimit, bitmap_bytes / 4);
       testResize(false);
     }
   }
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -558,6 +558,27 @@
   return fabs(value);
 }
 
+//----------------------------------------------------------------------------------------------------
+// Special casts
+// Cast floats into same-size integers and vice-versa w/o changing bit-pattern
+typedef union {
+  jfloat f;
+  jint i;
+} FloatIntConv;
+
+typedef union {
+  jdouble d;
+  jlong l;
+  julong ul;
+} DoubleLongConv;
+
+inline jint    jint_cast    (jfloat  x)  { return ((FloatIntConv*)&x)->i; }
+inline jfloat  jfloat_cast  (jint    x)  { return ((FloatIntConv*)&x)->f; }
+
+inline jlong   jlong_cast   (jdouble x)  { return ((DoubleLongConv*)&x)->l;  }
+inline julong  julong_cast  (jdouble x)  { return ((DoubleLongConv*)&x)->ul; }
+inline jdouble jdouble_cast (jlong   x)  { return ((DoubleLongConv*)&x)->d;  }
+
 inline jint low (jlong value)                    { return jint(value); }
 inline jint high(jlong value)                    { return jint(value >> 32); }
 
--- a/hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -167,17 +167,6 @@
 typedef uint32_t juint;
 typedef uint64_t julong;
 
-//----------------------------------------------------------------------------------------------------
-// Special (possibly not-portable) casts
-// Cast floats into same-size integers and vice-versa w/o changing bit-pattern
-// %%%%%% These seem like standard C++ to me--how about factoring them out? - Ungar
-
-inline jint    jint_cast   (jfloat  x)           { return *(jint*   )&x; }
-inline jlong   jlong_cast  (jdouble x)           { return *(jlong*  )&x; }
-inline julong  julong_cast (jdouble x)           { return *(julong* )&x; }
-
-inline jfloat  jfloat_cast (jint    x)           { return *(jfloat* )&x; }
-inline jdouble jdouble_cast(jlong   x)           { return *(jdouble*)&x; }
 
 //----------------------------------------------------------------------------------------------------
 // Constant for jlong (specifying an long long canstant is C++ compiler specific)
--- a/hotspot/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -183,15 +183,6 @@
 typedef unsigned int       juint;
 typedef unsigned long long julong;
 
-//----------------------------------------------------------------------------------------------------
-// Special (possibly not-portable) casts
-// Cast floats into same-size integers and vice-versa w/o changing bit-pattern
-
-inline jint    jint_cast   (jfloat  x)           { return *(jint*   )&x; }
-inline jlong   jlong_cast  (jdouble x)           { return *(jlong*  )&x; }
-
-inline jfloat  jfloat_cast (jint    x)           { return *(jfloat* )&x; }
-inline jdouble jdouble_cast(jlong   x)           { return *(jdouble*)&x; }
 
 //----------------------------------------------------------------------------------------------------
 // Constant for jlong (specifying an long long constant is C++ compiler specific)
--- a/hotspot/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -116,16 +116,6 @@
 typedef unsigned int     juint;
 typedef unsigned __int64 julong;
 
-//----------------------------------------------------------------------------------------------------
-// Special (possibly not-portable) casts
-// Cast floats into same-size integers and vice-versa w/o changing bit-pattern
-
-inline jint    jint_cast   (jfloat  x)           { return *(jint*   )&x; }
-inline jlong   jlong_cast  (jdouble x)           { return *(jlong*  )&x; }
-
-inline jfloat  jfloat_cast (jint    x)           { return *(jfloat* )&x; }
-inline jdouble jdouble_cast(jlong   x)           { return *(jdouble*)&x; }
-
 
 //----------------------------------------------------------------------------------------------------
 // Non-standard stdlib-like stuff:
--- a/hotspot/src/share/vm/utilities/globalDefinitions_xlc.hpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions_xlc.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -114,16 +114,6 @@
 typedef uint32_t juint;
 typedef uint64_t julong;
 
-//----------------------------------------------------------------------------------------------------
-// Special (possibly not-portable) casts
-// Cast floats into same-size integers and vice-versa w/o changing bit-pattern
-// %%%%%% These seem like standard C++ to me--how about factoring them out? - Ungar
-
-inline jint    jint_cast   (jfloat  x)           { return *(jint*   )&x; }
-inline jlong   jlong_cast  (jdouble x)           { return *(jlong*  )&x; }
-
-inline jfloat  jfloat_cast (jint    x)           { return *(jfloat* )&x; }
-inline jdouble jdouble_cast(jlong   x)           { return *(jdouble*)&x; }
 
 //----------------------------------------------------------------------------------------------------
 // Constant for jlong (specifying an long long canstant is C++ compiler specific)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/utilities/linkedlist.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+#include "runtime/os.hpp"
+#include "utilities/linkedlist.hpp"
+#include "memory/allocation.hpp"
+#include "memory/allocation.inline.hpp"
+
+class Integer : public StackObj {
+ private:
+  int  _value;
+ public:
+  Integer(int i) : _value(i) { }
+
+  int   value() const { return _value; }
+  bool  equals(const Integer& i) const {
+   return _value == i.value();
+  }
+};
+
+int compare_Integer(const Integer& i1, const Integer& i2) {
+  return i1.value() - i2.value();
+}
+
+void check_list_values(const int* expected, const LinkedList<Integer>* list) {
+  LinkedListNode<Integer>* head = list->head();
+  int index = 0;
+  while (head != NULL) {
+    assert(head->peek()->value() == expected[index], "Unexpected value");
+    head = head->next();
+    index ++;
+  }
+}
+
+void Test_linked_list() {
+  LinkedListImpl<Integer, ResourceObj::C_HEAP, mtTest>  ll;
+
+
+  // Test regular linked list
+  assert(ll.is_empty(), "Start with empty list");
+  Integer one(1), two(2), three(3), four(4), five(5), six(6);
+
+  ll.add(six);
+  assert(!ll.is_empty(), "Should not be empty");
+
+  Integer* i = ll.find(six);
+  assert(i != NULL, "Should find it");
+
+  i = ll.find(three);
+  assert(i == NULL, "Not in the list");
+
+  LinkedListNode<Integer>* node = ll.find_node(six);
+  assert(node != NULL, "6 is in the list");
+
+  ll.insert_after(three, node);
+  ll.insert_before(one, node);
+  int expected[3] = {1, 6, 3};
+  check_list_values(expected, &ll);
+
+  ll.add(two);
+  ll.add(four);
+  ll.add(five);
+
+  // Test sorted linked list
+  SortedLinkedList<Integer, compare_Integer, ResourceObj::C_HEAP, mtTest> sl;
+  assert(sl.is_empty(), "Start with empty list");
+
+  size_t ll_size = ll.size();
+  sl.move(&ll);
+  size_t sl_size = sl.size();
+
+  assert(ll_size == sl_size, "Should be the same size");
+  assert(ll.is_empty(), "No more entires");
+
+  // sorted result
+  int sorted_result[] = {1, 2, 3, 4, 5, 6};
+  check_list_values(sorted_result, &sl);
+
+  node = sl.find_node(four);
+  assert(node != NULL, "4 is in the list");
+  sl.remove_before(node);
+  sl.remove_after(node);
+  int remains[] = {1, 2, 4, 6};
+  check_list_values(remains, &sl);
+}
+#endif // PRODUCT
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/utilities/linkedlist.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,416 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_UTILITIES_LINKED_LIST_HPP
+#define SHARE_VM_UTILITIES_LINKED_LIST_HPP
+
+#include "memory/allocation.hpp"
+
+/*
+ * The implementation of a generic linked list, which uses various
+ * backing storages, such as C heap, arena and resource, etc.
+ */
+
+
+// An entry in a linked list. It should use the same backing storage
+// as the linked list that contains this entry.
+template <class E> class LinkedListNode : public ResourceObj {
+ private:
+  E                       _data;  // embedded content
+  LinkedListNode<E>*      _next;  // next entry
+
+ protected:
+  LinkedListNode() : _next(NULL) { }
+
+ public:
+  LinkedListNode(const E& e): _data(e), _next(NULL) { }
+
+  inline void set_next(LinkedListNode<E>* node) { _next = node; }
+  inline LinkedListNode<E> * next() const       { return _next; }
+
+  E*  data() { return &_data; }
+  const E* peek() const { return &_data; }
+};
+
+// A linked list interface. It does not specify
+// any storage type it uses, so all methods involving
+// memory allocation or deallocation are pure virtual
+template <class E> class LinkedList : public ResourceObj {
+ protected:
+  LinkedListNode<E>*    _head;
+
+ public:
+  LinkedList() : _head(NULL) { }
+
+  inline void set_head(LinkedListNode<E>* h) { _head = h; }
+  inline LinkedListNode<E>* head() const     { return _head; }
+  inline bool is_empty()           const     { return head() == NULL; }
+
+  inline size_t size() const {
+    LinkedListNode<E>* p;
+    size_t count = 0;
+    for (p = head(); p != NULL; count++, p = p->next());
+    return count;
+ }
+
+  // Move all entries from specified linked list to this one
+  virtual void move(LinkedList<E>* list) = 0;
+
+  // Add an entry to this linked list
+  virtual LinkedListNode<E>* add(const E& e) = 0;
+  // Add all entries from specified linked list to this one,
+  virtual void add(LinkedListNode<E>* node) = 0;
+
+  // Add a linked list to this linked list
+  virtual bool  add(const LinkedList<E>* list) = 0;
+
+  // Search entry in the linked list
+  virtual LinkedListNode<E>* find_node(const E& e) = 0;
+  virtual E* find(const E& e) = 0;
+
+  // Insert entry to the linked list
+  virtual LinkedListNode<E>* insert_before(const E& e, LinkedListNode<E>* ref) = 0;
+  virtual LinkedListNode<E>* insert_after (const E& e, LinkedListNode<E>* ref) = 0;
+
+  // Remove entry from the linked list
+  virtual bool               remove(const E& e) = 0;
+  virtual bool               remove(LinkedListNode<E>* node) = 0;
+  virtual bool               remove_before(LinkedListNode<E>* ref) = 0;
+  virtual bool               remove_after(LinkedListNode<E>*  ref) = 0;
+
+  LinkedListNode<E>* unlink_head() {
+    LinkedListNode<E>* h = this->head();
+    if (h != NULL) {
+      this->set_head(h->next());
+    }
+    return h;
+  }
+
+  DEBUG_ONLY(virtual ResourceObj::allocation_type storage_type() = 0;)
+};
+
+// A linked list implementation.
+// The linked list can be allocated in various type of memory: C heap, arena and resource area, etc.
+template <class E, ResourceObj::allocation_type T = ResourceObj::C_HEAP,
+  MEMFLAGS F = mtNMT, AllocFailType alloc_failmode = AllocFailStrategy::RETURN_NULL>
+  class LinkedListImpl : public LinkedList<E> {
+ protected:
+  Arena*                 _arena;
+ public:
+  LinkedListImpl() :  _arena(NULL) { }
+  LinkedListImpl(Arena* a) : _arena(a) { }
+
+  virtual ~LinkedListImpl() {
+    clear();
+  }
+
+  virtual void clear() {
+    LinkedListNode<E>* p = this->head();
+    this->set_head(NULL);
+    while (p != NULL) {
+      LinkedListNode<E>* to_delete = p;
+      p = p->next();
+      delete_node(to_delete);
+    }
+  }
+
+  // Add an entry to the linked list
+  virtual LinkedListNode<E>* add(const E& e)  {
+    LinkedListNode<E>* node = this->new_node(e);
+    if (node != NULL) {
+      this->add(node);
+    }
+
+    return node;
+  }
+
+  virtual void add(LinkedListNode<E>* node) {
+    assert(node != NULL, "NULL pointer");
+    node->set_next(this->head());
+    this->set_head(node);
+  }
+
+  // Move a linked list to this linked list, both have to be allocated on the same
+  // storage type.
+  virtual void move(LinkedList<E>* list) {
+    assert(list->storage_type() == this->storage_type(), "Different storage type");
+    LinkedListNode<E>* node = this->head();
+    while (node != NULL && node->next() != NULL) {
+      node = node->next();
+    }
+    if (node == NULL) {
+      this->set_head(list->head());
+    } else {
+      node->set_next(list->head());
+    }
+    // All entries are moved
+    list->set_head(NULL);
+  }
+
+  virtual bool add(const LinkedList<E>* list) {
+    LinkedListNode<E>* node = list->head();
+    while (node != NULL) {
+      if (this->add(*node->peek()) == NULL) {
+        return false;
+      }
+      node = node->next();
+    }
+    return true;
+  }
+
+
+  virtual LinkedListNode<E>* find_node(const E& e) {
+    LinkedListNode<E>* p = this->head();
+    while (p != NULL && !p->peek()->equals(e)) {
+      p = p->next();
+    }
+    return p;
+  }
+
+  E* find(const E& e) {
+    LinkedListNode<E>* node = find_node(e);
+    return (node == NULL) ? NULL : node->data();
+  }
+
+
+  // Add an entry in front of the reference entry
+  LinkedListNode<E>* insert_before(const E& e, LinkedListNode<E>* ref_node) {
+    LinkedListNode<E>* node = this->new_node(e);
+    if (node == NULL) return NULL;
+    if (ref_node == this->head()) {
+      node->set_next(ref_node);
+      this->set_head(node);
+    } else {
+      LinkedListNode<E>* p = this->head();
+      while (p != NULL && p->next() != ref_node) {
+        p = p->next();
+      }
+      assert(p != NULL, "ref_node not in the list");
+      node->set_next(ref_node);
+      p->set_next(node);
+    }
+    return node;
+  }
+
+   // Add an entry behind the reference entry
+   LinkedListNode<E>* insert_after(const E& e, LinkedListNode<E>* ref_node) {
+     LinkedListNode<E>* node = this->new_node(e);
+     if (node == NULL) return NULL;
+     node->set_next(ref_node->next());
+     ref_node->set_next(node);
+     return node;
+   }
+
+   // Remove an entry from the linked list.
+   // Return true if the entry is successfully removed
+   virtual bool remove(const E& e) {
+     LinkedListNode<E>* tmp = this->head();
+     LinkedListNode<E>* prev = NULL;
+
+     while (tmp != NULL) {
+       if (tmp->peek()->equals(e)) {
+         return remove_after(prev);
+       }
+       prev = tmp;
+       tmp = tmp->next();
+     }
+     return false;
+  }
+
+  // Remove the node after the reference entry
+  virtual bool remove_after(LinkedListNode<E>* prev) {
+    LinkedListNode<E>* to_delete;
+    if (prev == NULL) {
+      to_delete = this->unlink_head();
+    } else {
+      to_delete = prev->next();
+      if (to_delete != NULL) {
+        prev->set_next(to_delete->next());
+      }
+    }
+
+    if (to_delete != NULL) {
+      delete_node(to_delete);
+      return true;
+    }
+    return false;
+  }
+
+  virtual bool remove(LinkedListNode<E>* node) {
+    LinkedListNode<E>* p = this->head();
+    while (p != NULL && p->next() != node) {
+      p = p->next();
+    }
+    if (p != NULL) {
+      p->set_next(node->next());
+      delete_node(node);
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  virtual bool remove_before(LinkedListNode<E>* ref) {
+    assert(ref != NULL, "NULL pointer");
+    LinkedListNode<E>* p = this->head();
+    LinkedListNode<E>* to_delete = NULL; // to be deleted
+    LinkedListNode<E>* prev = NULL;      // node before the node to be deleted
+    while (p != NULL && p != ref) {
+      prev = to_delete;
+      to_delete = p;
+      p = p->next();
+    }
+    if (p == NULL || to_delete == NULL) return false;
+    assert(to_delete->next() == ref, "Wrong node to delete");
+    assert(prev == NULL || prev->next() == to_delete,
+      "Sanity check");
+    if (prev == NULL) {
+      assert(to_delete == this->head(), "Must be head");
+      this->set_head(to_delete->next());
+    } else {
+      prev->set_next(to_delete->next());
+    }
+    delete_node(to_delete);
+    return true;
+  }
+
+  DEBUG_ONLY(ResourceObj::allocation_type storage_type() { return T; })
+ protected:
+  // Create new linked list node object in specified storage
+  LinkedListNode<E>* new_node(const E& e) const {
+     switch(T) {
+       case ResourceObj::ARENA: {
+         assert(_arena != NULL, "Arena not set");
+         return new(_arena) LinkedListNode<E>(e);
+       }
+       case ResourceObj::RESOURCE_AREA:
+       case ResourceObj::C_HEAP: {
+         if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
+           return new(std::nothrow, T, F) LinkedListNode<E>(e);
+         } else {
+           return new(T, F) LinkedListNode<E>(e);
+         }
+       }
+       default:
+         ShouldNotReachHere();
+     }
+     return NULL;
+  }
+
+  // Delete linked list node object
+  void delete_node(LinkedListNode<E>* node) {
+    if (T == ResourceObj::C_HEAP) {
+      delete node;
+    }
+  }
+};
+
+// Sorted linked list. The linked list maintains sorting order specified by the comparison
+// function
+template <class E, int (*FUNC)(const E&, const E&),
+  ResourceObj::allocation_type T = ResourceObj::C_HEAP,
+  MEMFLAGS F = mtNMT, AllocFailType alloc_failmode = AllocFailStrategy::RETURN_NULL>
+  class SortedLinkedList : public LinkedListImpl<E, T, F, alloc_failmode> {
+ public:
+  SortedLinkedList() { }
+  SortedLinkedList(Arena* a) : LinkedListImpl<E, T, F, alloc_failmode>(a) { }
+
+  virtual LinkedListNode<E>* add(const E& e) {
+    return LinkedListImpl<E, T, F, alloc_failmode>::add(e);
+  }
+
+  virtual void move(LinkedList<E>* list) {
+    assert(list->storage_type() == this->storage_type(), "Different storage type");
+    LinkedListNode<E>* node;
+    while ((node = list->unlink_head()) != NULL) {
+      this->add(node);
+    }
+    assert(list->is_empty(), "All entries are moved");
+  }
+
+  virtual void add(LinkedListNode<E>* node) {
+    assert(node != NULL, "NULL pointer");
+    LinkedListNode<E>* tmp = this->head();
+    LinkedListNode<E>* prev = NULL;
+
+    int cmp_val;
+    while (tmp != NULL) {
+      cmp_val = FUNC(*tmp->peek(), *node->peek());
+      if (cmp_val >= 0) {
+        break;
+      }
+      prev = tmp;
+      tmp = tmp->next();
+    }
+
+    if (prev != NULL) {
+      node->set_next(prev->next());
+      prev->set_next(node);
+    } else {
+      node->set_next(this->head());
+      this->set_head(node);
+    }
+  }
+
+  virtual bool add(const LinkedList<E>* list) {
+    return LinkedListImpl<E, T, F, alloc_failmode>::add(list);
+  }
+
+  virtual LinkedListNode<E>* find_node(const E& e) {
+    LinkedListNode<E>* p = this->head();
+
+    while (p != NULL) {
+      int comp_val = FUNC(*p->peek(), e);
+      if (comp_val == 0) {
+        return p;
+      } else if (comp_val > 0) {
+        return NULL;
+      }
+      p = p->next();
+    }
+    return NULL;
+  }
+};
+
+// Iterates all entries in the list
+template <class E> class LinkedListIterator : public StackObj {
+ private:
+  LinkedListNode<E>* _p;
+  bool               _is_empty;
+ public:
+  LinkedListIterator(LinkedListNode<E>* head) : _p(head) {
+    _is_empty = (head == NULL);
+  }
+
+  bool is_empty() const { return _is_empty; }
+
+  const E* next() {
+    if (_p == NULL) return NULL;
+    const E* e = _p->peek();
+    _p = _p->next();
+    return e;
+  }
+};
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/utilities/nativeCallStack.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/os.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/nativeCallStack.hpp"
+
+
+NativeCallStack::NativeCallStack(int toSkip, bool fillStack) :
+  _hash_value(0) {
+
+#if !PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
+  fillStack = false;
+#endif
+
+  if (fillStack) {
+    os::get_native_stack(_stack, NMT_TrackingStackDepth, toSkip);
+  } else {
+    for (int index = 0; index < NMT_TrackingStackDepth; index ++) {
+      _stack[index] = NULL;
+    }
+  }
+}
+
+NativeCallStack::NativeCallStack(address* pc, int frameCount) {
+  int frameToCopy = (frameCount < NMT_TrackingStackDepth) ?
+    frameCount : NMT_TrackingStackDepth;
+  int index;
+  for (index = 0; index < frameToCopy; index ++) {
+    _stack[index] = pc[index];
+  }
+  for (; index < NMT_TrackingStackDepth; index ++) {
+    _stack[index] = NULL;
+  }
+}
+
+// number of stack frames captured
+int NativeCallStack::frames() const {
+  int index;
+  for (index = 0; index < NMT_TrackingStackDepth; index ++) {
+    if (_stack[index] == NULL) {
+      break;
+    }
+  }
+  return index;
+}
+
+// Hash code. Any better algorithm?
+int NativeCallStack::hash() const {
+  long hash_val = _hash_value;
+  if (hash_val == 0) {
+    long pc;
+    int  index;
+    for (index = 0; index < NMT_TrackingStackDepth; index ++) {
+      pc = (long)_stack[index];
+      if (pc == 0) break;
+      hash_val += pc;
+    }
+
+    NativeCallStack* p = const_cast<NativeCallStack*>(this);
+    p->_hash_value = (int)(hash_val & 0xFFFFFFFF);
+  }
+  return _hash_value;
+}
+
+void NativeCallStack::print_on(outputStream* out) const {
+  print_on(out, 0);
+}
+
+// Decode and print this call path
+void NativeCallStack::print_on(outputStream* out, int indent) const {
+  address pc;
+  char    buf[1024];
+  int     offset;
+  if (is_empty()) {
+    for (int index = 0; index < indent; index ++) out->print(" ");
+#if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
+    out->print("[BOOTSTRAP]");
+#else
+    out->print("[No stack]");
+#endif
+  } else {
+    for (int frame = 0; frame < NMT_TrackingStackDepth; frame ++) {
+      pc = get_frame(frame);
+      if (pc == NULL) break;
+      // Print indent
+      for (int index = 0; index < indent; index ++) out->print(" ");
+      if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
+        out->print_cr("[" PTR_FORMAT "] %s+0x%x", p2i(pc), buf, offset);
+      } else {
+        out->print_cr("[" PTR_FORMAT "]", p2i(pc));
+      }
+    }
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/utilities/nativeCallStack.hpp	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_UTILITIES_NATIVE_CALL_STACK_HPP
+#define SHARE_VM_UTILITIES_NATIVE_CALL_STACK_HPP
+
+#include "memory/allocation.hpp"
+#include "services/nmtCommon.hpp"
+#include "utilities/ostream.hpp"
+
+/*
+ * This class represents a native call path (does not include Java frame)
+ *
+ * This class is developed in the context of native memory tracking, it can
+ * be an useful tool for debugging purpose.
+ *
+ * For example, following code should print out native call path:
+ *
+ *   ....
+ *   NativeCallStack here;
+ *   here.print_on(tty);
+ *   ....
+ *
+ * However, there are a couple of restrictions on this class. If the restrictions are
+ * not strictly followed, it may break native memory tracking badly.
+ *
+ * 1. Number of stack frames to capture, is defined by native memory tracking.
+ *    This number has impacts on how much memory to be used by native
+ *    memory tracking.
+ * 2. The class is strict stack object, no heap or virtual memory can be allocated
+ *    from it.
+ */
+class NativeCallStack : public StackObj {
+ private:
+  address   _stack[NMT_TrackingStackDepth];
+  int       _hash_value;
+
+ public:
+  NativeCallStack(int toSkip = 0, bool fillStack = false);
+  NativeCallStack(address* pc, int frameCount);
+
+
+  // if it is an empty stack
+  inline bool is_empty() const {
+    return _stack[0] == NULL;
+  }
+
+  // number of stack frames captured
+  int frames() const;
+
+  inline int compare(const NativeCallStack& other) const {
+    return memcmp(_stack, other._stack, sizeof(_stack));
+  }
+
+  inline bool equals(const NativeCallStack& other) const {
+    // compare hash values
+    if (hash() != other.hash()) return false;
+    // compare each frame
+    return compare(other) == 0;
+  }
+
+  inline address get_frame(int index) const {
+    assert(index >= 0 && index < NMT_TrackingStackDepth, "Index out of bound");
+    return _stack[index];
+  }
+
+  // Hash code. Any better algorithm?
+  int hash() const;
+
+  void print_on(outputStream* out) const;
+  void print_on(outputStream* out, int indent) const;
+};
+
+#endif
--- a/hotspot/src/share/vm/utilities/vmError.cpp	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/src/share/vm/utilities/vmError.cpp	Thu Aug 28 14:53:43 2014 -0700
@@ -774,6 +774,11 @@
        st->cr();
      }
 
+  STEP(228, "(Native Memory Tracking)" )
+     if (_verbose) {
+       MemTracker::final_report(st);
+     }
+
   STEP(230, "" )
 
      if (_verbose) {
@@ -897,9 +902,6 @@
   static bool log_done = false;         // done saving error log
   static bool transmit_report_done = false; // done error reporting
 
-  // disble NMT to avoid further exception
-  MemTracker::shutdown(MemTracker::NMT_error_reporting);
-
   if (SuppressFatalErrorMessage) {
       os::abort();
   }
--- a/hotspot/test/TEST.ROOT	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/TEST.ROOT	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 # 
-# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,6 @@
 # It also contains test-suite configuration information.
 
 # The list of keywords supported in this test suite
-keys=cte_test jcmd nmt regression gc
+keys=cte_test jcmd nmt regression gc stress
 
 groups=TEST.groups [closed/TEST.groups]
--- a/hotspot/test/TEST.groups	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/TEST.groups	Thu Aug 28 14:53:43 2014 -0700
@@ -70,21 +70,29 @@
   runtime/7194254/Test7194254.java \
   runtime/Metaspace/FragmentMetaspace.java \
   runtime/NMT/BaselineWithParameter.java \
+  runtime/NMT/JcmdBaselineDetail.java \
+  runtime/NMT/JcmdDetailDiff.java \
+  runtime/NMT/JcmdScaleDetail.java \
   runtime/NMT/JcmdScale.java \
+  runtime/NMT/JcmdSummaryDiff.java \
   runtime/NMT/JcmdWithNMTDisabled.java \
+  runtime/NMT/MallocRoundingReportTest.java \
+  runtime/NMT/MallocSiteHashOverflow.java \
+  runtime/NMT/MallocStressTest.java \
   runtime/NMT/MallocTestType.java \
   runtime/NMT/ReleaseCommittedMemory.java \
+  runtime/NMT/ReleaseNoCommit.java \
   runtime/NMT/ShutdownTwice.java \
   runtime/NMT/SummaryAfterShutdown.java \
   runtime/NMT/SummarySanityCheck.java \
   runtime/NMT/ThreadedMallocTestType.java \
   runtime/NMT/ThreadedVirtualAllocTestType.java \
+  runtime/NMT/VirtualAllocCommitUncommitRecommit.java \
   runtime/NMT/VirtualAllocTestType.java \
   runtime/RedefineObject/TestRedefineObject.java \
   runtime/Thread/TestThreadDumpMonitorContention.java \
   runtime/XCheckJniJsig/XCheckJSig.java \
   serviceability/attach/AttachWithStalePidFile.java \
-  serviceability/jvmti/8036666/GetObjectLockCount.java \
   serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java \
   serviceability/dcmd/DynLibDcmdTest.java
 
@@ -120,16 +128,12 @@
   gc/6581734/Test6581734.java \
   gc/7072527/TestFullGCCount.java \
   gc/g1/TestHumongousAllocInitialMark.java \
-  gc/g1/TestHumongousShrinkHeap.java \
   gc/arguments/TestG1HeapRegionSize.java \
   gc/metaspace/TestMetaspaceMemoryPool.java \
   gc/arguments/TestDynMinHeapFreeRatio.java \
   gc/arguments/TestDynMaxHeapFreeRatio.java \
-  gc/parallelScavenge/TestDynShrinkHeap.java \
   runtime/InternalApi/ThreadCpuTimesDeadlock.java \
   serviceability/threads/TestFalseDeadLock.java \
-  serviceability/jvmti/GetObjectSizeOverflow.java \
-  serviceability/jvmti/TestRedefineWithUnresolvedClass.java
 
 # Compact 2 adds full VM tests
 compact2 = \
@@ -217,6 +221,7 @@
   gc/arguments/TestMaxHeapSizeTools.java \
   gc/arguments/TestMaxNewSize.java \
   gc/arguments/TestUseCompressedOopsErgo.java \
+  gc/class_unloading/TestG1ClassUnloadingHWM.java \
   gc/g1/ \
   gc/metaspace/G1AddMetaspaceDependency.java \
   gc/metaspace/TestMetaspacePerfCounters.java \
@@ -257,7 +262,7 @@
   gc/arguments/TestCMSHeapSizeFlags.java \
   gc/arguments/TestMaxNewSize.java \
   gc/arguments/TestUseCompressedOopsErgo.java \
-  gc/class_unloading/TestCMSClassUnloadingDisabledHWM.java \
+  gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java \
   gc/concurrentMarkSweep/ \
   gc/startup_warnings/TestCMS.java \
   gc/startup_warnings/TestCMSIncrementalMode.java \
--- a/hotspot/test/compiler/5091921/Test7005594.sh	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/5091921/Test7005594.sh	Thu Aug 28 14:53:43 2014 -0700
@@ -1,6 +1,6 @@
 #!/bin/sh
 # 
-# Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 # This code is free software; you can redistribute it and/or modify it
@@ -78,7 +78,7 @@
 
 ${COMPILEJAVA}/bin/javac ${TESTJAVACOPTS} -d . Test7005594.java
 
-${TESTJAVA}/bin/java ${TESTVMOPTS} -Xmx1600m -Xms1600m -XX:+IgnoreUnrecognizedVMOptions -XX:-ZapUnusedHeapArea -Xcomp -XX:CompileOnly=Test7005594.test Test7005594 > test.out 2>&1
+${TESTJAVA}/bin/java ${TESTOPTS} -Xmx1600m -Xms1600m -XX:+IgnoreUnrecognizedVMOptions -XX:-ZapUnusedHeapArea -Xcomp -XX:CompileOnly=Test7005594.test Test7005594 > test.out 2>&1
 
 result=$?
 
--- a/hotspot/test/compiler/6857159/Test6857159.sh	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/6857159/Test6857159.sh	Thu Aug 28 14:53:43 2014 -0700
@@ -1,6 +1,6 @@
 #!/bin/sh
 # 
-# Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 # This code is free software; you can redistribute it and/or modify it
@@ -39,7 +39,7 @@
 
 ${COMPILEJAVA}/bin/javac ${TESTJAVACOPTS} -d . Test6857159.java
 
-${TESTJAVA}/bin/java  ${TESTVMOPTS} -Xbatch -XX:+PrintCompilation -XX:CompileOnly=Test6857159\$ct.run Test6857159 > test.out 2>&1
+${TESTJAVA}/bin/java  ${TESTOPTS} -Xbatch -XX:+PrintCompilation -XX:CompileOnly=Test6857159\$ct.run Test6857159 > test.out 2>&1
 
 grep "COMPILE SKIPPED" test.out
 
--- a/hotspot/test/compiler/6894807/IsInstanceTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/6894807/IsInstanceTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,3 +1,26 @@
+/*
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
 /*
  * @test
  * @bug 6894807
--- a/hotspot/test/compiler/6894807/Test6894807.sh	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/6894807/Test6894807.sh	Thu Aug 28 14:53:43 2014 -0700
@@ -1,55 +1,38 @@
 #!/bin/sh
+#
+#  Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+#  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+# 
+#  This code is free software; you can redistribute it and/or modify it
+#  under the terms of the GNU General Public License version 2 only, as
+#  published by the Free Software Foundation.
+# 
+#  This code is distributed in the hope that it will be useful, but WITHOUT
+#  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+#  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+#  version 2 for more details (a copy is included in the LICENSE file that
+#  accompanied this code).
+# 
+#  You should have received a copy of the GNU General Public License version
+#  2 along with this work; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+# 
+#  Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+#  or visit www.oracle.com if you need additional information or have any
+#  questions.
+# 
 
 if [ "${TESTSRC}" = "" ]
-then TESTSRC=.
+then
+  TESTSRC=${PWD}
+  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
 fi
-
-if [ "${TESTJAVA}" = "" ]
-then
-  PARENT=`dirname \`which java\``
-  TESTJAVA=`dirname ${PARENT}`
-  echo "TESTJAVA not set, selecting " ${TESTJAVA}
-  echo "If this is incorrect, try setting the variable manually."
-fi
-
-if [ "${TESTCLASSES}" = "" ]
-then
-  echo "TESTCLASSES not set.  Test cannot execute.  Failed."
-  exit 1
-fi
+echo "TESTSRC=${TESTSRC}"
 
-# set platform-dependent variables
-OS=`uname -s`
-case "$OS" in
-  SunOS | Linux | Darwin )
-    NULL=/dev/null
-    PS=":"
-    FS="/"
-    ;;
-  Windows_* )
-    NULL=NUL
-    PS=";"
-    FS="\\"
-    ;;
-  CYGWIN_* )
-    NULL=/dev/null
-    PS=";"
-    FS="/"
-    ;;
-  * )
-    echo "Unrecognized system!"
-    exit 1;
-    ;;
-esac
+## Adding common setup Variables for running shell tests.
+. ${TESTSRC}/../../test_env.sh
 
-JEMMYPATH=${CPAPPEND}
-CLASSPATH=.${PS}${TESTCLASSES}${PS}${JEMMYPATH} ; export CLASSPATH
-
-THIS_DIR=`pwd`
-
-${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -version
-
-${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} IsInstanceTest > test.out 2>&1
+${TESTJAVA}${FS}bin${FS}java ${TESTOPTS} IsInstanceTest > test.out 2>&1
 
 cat test.out
 
--- a/hotspot/test/compiler/6932496/Test6932496.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/6932496/Test6932496.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,26 +26,162 @@
  * @test
  * @bug 6932496
  * @summary incorrect deopt of jsr subroutine on 64 bit c1
- *
- * @compile -source 1.5 -target 1.5 -XDjsrlimit=0 Test6932496.java
- * @run main/othervm -Xcomp -XX:CompileOnly=Test6932496.m Test6932496
+ * @run main/othervm -Xcomp -XX:CompileOnly=Test.test Test6932496
  */
+import java.lang.reflect.Method;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.io.IOException;
 
-public class Test6932496 {
-    static class A {
-        volatile boolean flag = false;
+import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.MethodVisitor;
+import jdk.internal.org.objectweb.asm.FieldVisitor;
+import jdk.internal.org.objectweb.asm.Opcodes;
+import jdk.internal.org.objectweb.asm.Type;
+import jdk.internal.org.objectweb.asm.Label;
+
+public class Test6932496 extends ClassLoader {
+    private static final int CLASS_FILE_VERSION = 49;
+    private static final String CLASS_TEST = "Test";
+    private static final String CLASS_OBJECT = "java/lang/Object";
+    private static final String METHOD_INIT = "<init>";
+    private static final String METHOD_TEST = "test";
+    private static final String DESC_VOID_METHOD = "()V";
+    private static final String FIELD_FLAG = "flag";
+
+    public static void main(String[] args) {
+        Test6932496 test = new Test6932496();
+        test.execute();
     }
 
-    static void m() {
+    private void execute() {
+        byte[] bytecode = Test6932496.generateTestClass();
+
         try {
-        } finally {
-            A a = new A();
-            a.flag = true;
+            Files.write(Paths.get("Test.class.dump"), bytecode);
+        } catch (IOException e) {
+            System.err.println("classfile dump failed : " + e.getMessage());
+            e.printStackTrace();
+        }
+        try {
+            Class aClass = defineClass(CLASS_TEST, bytecode, 0, bytecode.length);
+            Method test = aClass.getDeclaredMethod(METHOD_TEST);
+            test.invoke(null);
+        } catch (ClassFormatError | IllegalArgumentException
+                    | ReflectiveOperationException e) {
+            throw new RuntimeException("TESTBUG : generated class is invalid", e);
         }
     }
 
+    /*
+        public class Test {
+            volatile boolean flag = false;
+            public static void m() {
+                try {
+                } finally {
+                    Test test = new Test();
+                    test.flag = true;
+                }
+            }
+        }
+    */
+    private static byte[] generateTestClass() {
+        ClassWriter cw = new ClassWriter(0);
+        cw.visit(CLASS_FILE_VERSION, Opcodes.ACC_PUBLIC + Opcodes.ACC_SUPER,
+                CLASS_TEST, null, CLASS_OBJECT, null);
+        // volatile boolean flag;
+        {
+            FieldVisitor fv = cw.visitField(Opcodes.ACC_VOLATILE, FIELD_FLAG,
+                    Type.BOOLEAN_TYPE.getDescriptor(),
+                    /* signature = */ null, /* value = */ null);
+        }
 
-    static public void main(String[] args) {
-        m();
+        /*
+            public Test() {
+                flag = false;
+            }
+        */
+        {
+            MethodVisitor mv = cw.visitMethod(Opcodes.ACC_PUBLIC,
+                    METHOD_INIT, DESC_VOID_METHOD,
+                    /* signature = */ null, /* exceptions = */ null);
+
+            mv.visitCode();
+            mv.visitVarInsn(Opcodes.ALOAD, 0);
+            mv.visitMethodInsn(Opcodes.INVOKESPECIAL, CLASS_OBJECT, METHOD_INIT,
+                    DESC_VOID_METHOD, false);
+
+            mv.visitVarInsn(Opcodes.ALOAD, 0);
+            mv.visitInsn(Opcodes.ICONST_0);
+            mv.visitFieldInsn(Opcodes.PUTFIELD, CLASS_TEST, FIELD_FLAG,
+                    Type.BOOLEAN_TYPE.getDescriptor());
+
+            mv.visitInsn(Opcodes.RETURN);
+            mv.visitMaxs(/* stack = */ 2, /* locals = */ 1);
+            mv.visitEnd();
+        }
+
+        /*
+            public static void m() {
+                try {
+                } finally {
+                    Test test = new Test();
+                    test.flag = true;
+                }
+            }
+        */
+        {
+            MethodVisitor mv = cw.visitMethod(
+                    Opcodes.ACC_STATIC + Opcodes.ACC_PUBLIC,
+                    METHOD_TEST, DESC_VOID_METHOD,
+                    /* signature = */ null, /* exceptions = */ null);
+            Label beginLabel = new Label();
+            Label block1EndLabel = new Label();
+            Label handlerLabel = new Label();
+            Label block2EndLabel = new Label();
+            Label label = new Label();
+            Label endLabel = new Label();
+
+            mv.visitCode();
+            mv.visitTryCatchBlock(beginLabel, block1EndLabel, handlerLabel,
+                    /* type = <any> */ null);
+            mv.visitTryCatchBlock(handlerLabel, block2EndLabel, handlerLabel,
+                    /* type = <any> */ null);
+
+            mv.visitLabel(beginLabel);
+            mv.visitJumpInsn(Opcodes.JSR, label);
+            mv.visitLabel(block1EndLabel);
+            mv.visitJumpInsn(Opcodes.GOTO, endLabel);
+
+            mv.visitLabel(handlerLabel);
+            mv.visitVarInsn(Opcodes.ASTORE, 0);
+            mv.visitJumpInsn(Opcodes.JSR, label);
+            mv.visitLabel(block2EndLabel);
+            mv.visitVarInsn(Opcodes.ALOAD, 0);
+            mv.visitInsn(Opcodes.ATHROW);
+
+            mv.visitLabel(label);
+            mv.visitVarInsn(Opcodes.ASTORE, 1);
+            mv.visitTypeInsn(Opcodes.NEW, CLASS_TEST);
+            mv.visitInsn(Opcodes.DUP);
+            mv.visitMethodInsn(Opcodes.INVOKESPECIAL, CLASS_TEST, METHOD_INIT,
+                    DESC_VOID_METHOD);
+            mv.visitVarInsn(Opcodes.ASTORE, 2);
+
+            mv.visitVarInsn(Opcodes.ALOAD, 2);
+            mv.visitInsn(Opcodes.ICONST_1);
+            mv.visitFieldInsn(Opcodes.PUTFIELD, CLASS_TEST, FIELD_FLAG,
+                    Type.BOOLEAN_TYPE.getDescriptor());
+
+            mv.visitVarInsn(Opcodes.RET, 1);
+
+            mv.visitLabel(endLabel);
+            mv.visitInsn(Opcodes.RETURN);
+            mv.visitMaxs(/* stack = */ 2, /* locals = */ 3);
+            mv.visitEnd();
+        }
+
+        cw.visitEnd();
+        return cw.toByteArray();
     }
 }
--- a/hotspot/test/compiler/7068051/Test7068051.sh	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/7068051/Test7068051.sh	Thu Aug 28 14:53:43 2014 -0700
@@ -1,6 +1,6 @@
 #!/bin/sh
 # 
-# Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 # This code is free software; you can redistribute it and/or modify it
@@ -41,5 +41,5 @@
 
 ${COMPILEJAVA}/bin/javac ${TESTJAVACOPTS} -d . Test7068051.java
 
-${TESTJAVA}/bin/java ${TESTVMOPTS} -showversion -Xbatch Test7068051 foo.jar
+${TESTJAVA}/bin/java ${TESTOPTS} -showversion -Xbatch Test7068051 foo.jar
 
--- a/hotspot/test/compiler/7070134/Test7070134.sh	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/7070134/Test7070134.sh	Thu Aug 28 14:53:43 2014 -0700
@@ -1,6 +1,6 @@
 #!/bin/sh
 # 
-# Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 # This code is free software; you can redistribute it and/or modify it
@@ -39,7 +39,7 @@
 
 ${COMPILEJAVA}/bin/javac ${TESTJAVACOPTS} -d . Stemmer.java
 
-${TESTJAVA}/bin/java ${TESTVMOPTS} -Xbatch Stemmer words > test.out 2>&1
+${TESTJAVA}/bin/java ${TESTOPTS} -Xbatch Stemmer words > test.out 2>&1
 
 exit $?
 
--- a/hotspot/test/compiler/7200264/Test7200264.sh	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/7200264/Test7200264.sh	Thu Aug 28 14:53:43 2014 -0700
@@ -1,6 +1,6 @@
 #!/bin/sh
 # 
-# Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 # This code is free software; you can redistribute it and/or modify it
@@ -33,7 +33,7 @@
 ## Adding common setup Variables for running shell tests.
 . ${TESTSRC}/../../test_env.sh
 
-${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -Xinternalversion | sed 's/amd64/x86/' | grep "x86" | grep "Server VM" | grep "debug"
+${TESTJAVA}${FS}bin${FS}java ${TESTOPTS} -Xinternalversion | sed 's/amd64/x86/' | grep "x86" | grep "Server VM" | grep "debug"
 
 # Only test fastdebug Server VM on x86
 if [ $? != 0 ]
@@ -43,7 +43,7 @@
 fi
 
 # grep for support integer multiply vectors (cpu with SSE4.1)
-${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -XX:+PrintMiscellaneous -XX:+Verbose -version | grep "cores per cpu" | grep "sse4.1"
+${TESTJAVA}${FS}bin${FS}java ${TESTOPTS} -XX:+PrintMiscellaneous -XX:+Verbose -version | grep "cores per cpu" | grep "sse4.1"
 
 if [ $? != 0 ]
 then
@@ -55,7 +55,7 @@
 cp ${TESTSRC}${FS}TestIntVect.java .
 ${COMPILEJAVA}${FS}bin${FS}javac ${TESTJAVACOPTS} -d . TestIntVect.java
 
-${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -Xbatch -XX:-TieredCompilation -XX:CICompilerCount=1 -XX:+PrintCompilation -XX:+TraceNewVectors TestIntVect > test.out 2>&1
+${TESTJAVA}${FS}bin${FS}java ${TESTOPTS} -Xbatch -XX:-TieredCompilation -XX:CICompilerCount=1 -XX:+PrintCompilation -XX:+TraceNewVectors TestIntVect > test.out 2>&1
 
 COUNT=`grep AddVI test.out | wc -l | awk '{print $1}'`
 if [ $COUNT -lt 4 ]
--- a/hotspot/test/compiler/8009761/Test8009761.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/8009761/Test8009761.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,7 @@
  * @summary Deoptimization on sparc doesn't set Llast_SP correctly in the interpreter frames it creates
  * @build Test8009761
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=exclude,Test8009761::m2 -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -Xss256K Test8009761
  */
 public class Test8009761 {
--- a/hotspot/test/compiler/8010927/Test8010927.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/8010927/Test8010927.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
  * @library /testlibrary/whitebox /testlibrary
  * @build Test8010927
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:. -Xmx64m -XX:NewSize=20971520 -XX:MaxNewSize=32m -XX:-UseTLAB -XX:-UseParNewGC -XX:-UseAdaptiveSizePolicy Test8010927
  */
 
--- a/hotspot/test/compiler/arguments/TestUseBMI1InstructionsOnSupportedCPU.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/arguments/TestUseBMI1InstructionsOnSupportedCPU.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @build TestUseBMI1InstructionsOnSupportedCPU
  *        BMISupportedCPUTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestUseBMI1InstructionsOnSupportedCPU
  */
--- a/hotspot/test/compiler/arguments/TestUseBMI1InstructionsOnUnsupportedCPU.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/arguments/TestUseBMI1InstructionsOnUnsupportedCPU.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @build TestUseBMI1InstructionsOnUnsupportedCPU
  *        BMIUnsupportedCPUTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestUseBMI1InstructionsOnUnsupportedCPU
  */
--- a/hotspot/test/compiler/arguments/TestUseCountLeadingZerosInstructionOnSupportedCPU.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/arguments/TestUseCountLeadingZerosInstructionOnSupportedCPU.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @build TestUseCountLeadingZerosInstructionOnSupportedCPU
  *        BMISupportedCPUTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI
  *                   TestUseCountLeadingZerosInstructionOnSupportedCPU
--- a/hotspot/test/compiler/arguments/TestUseCountLeadingZerosInstructionOnUnsupportedCPU.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/arguments/TestUseCountLeadingZerosInstructionOnUnsupportedCPU.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @build TestUseCountLeadingZerosInstructionOnUnsupportedCPU
  *        BMIUnsupportedCPUTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI
  *                   TestUseCountLeadingZerosInstructionOnUnsupportedCPU
--- a/hotspot/test/compiler/arguments/TestUseCountTrailingZerosInstructionOnSupportedCPU.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/arguments/TestUseCountTrailingZerosInstructionOnSupportedCPU.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @build TestUseCountTrailingZerosInstructionOnSupportedCPU
  *        BMISupportedCPUTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI
  *                   TestUseCountTrailingZerosInstructionOnSupportedCPU
--- a/hotspot/test/compiler/arguments/TestUseCountTrailingZerosInstructionOnUnsupportedCPU.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/arguments/TestUseCountTrailingZerosInstructionOnUnsupportedCPU.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @build TestUseCountTrailingZerosInstructionOnUnsupportedCPU
  *        BMIUnsupportedCPUTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI
  *                   TestUseCountTrailingZerosInstructionOnUnsupportedCPU
--- a/hotspot/test/compiler/ciReplay/TestSA.sh	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/ciReplay/TestSA.sh	Thu Aug 28 14:53:43 2014 -0700
@@ -68,7 +68,7 @@
 fi
 
 echo "dumpreplaydata -a > ${replay_data}" | \
-        ${JAVA} ${TESTVMOPTS} \
+        ${JAVA} ${TESTOPTS} \
         -cp ${TESTJAVA}${FS}lib${FS}sa-jdi.jar \
         sun.jvm.hotspot.CLHSDB  ${JAVA} ${core_file}
 
--- a/hotspot/test/compiler/ciReplay/common.sh	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/ciReplay/common.sh	Thu Aug 28 14:53:43 2014 -0700
@@ -22,6 +22,7 @@
 # questions.
 # 
 # 
+set -x
 
 # $1 - error code
 # $2 - test name
@@ -56,7 +57,7 @@
     shift
     name=$1
     shift
-    VMOPTS="${TESTVMOPTS} $@"
+    VMOPTS="${TESTOPTS} $@"
     echo "POSITIVE TEST [$name]"
     start_test ${VMOPTS}
     exit_code=$?
@@ -75,7 +76,7 @@
     shift
     name=$1
     shift
-    VMOPTS="${TESTVMOPTS} $@"
+    VMOPTS="${TESTOPTS} $@"
     echo "NEGATIVE TEST [$name]"
     start_test ${VMOPTS}
     exit_code=$?
@@ -149,7 +150,7 @@
 
 replay_data=test_replay.txt
 
-${JAVA} ${TESTVMOPTS} -Xinternalversion 2>&1 | grep debug
+${JAVA} ${TESTOPTS} -Xinternalversion 2>&1 | grep debug
 
 # Only test fastdebug 
 if [ $? -ne 0 ]
@@ -158,7 +159,7 @@
     exit 0
 fi
 
-is_int=`${JAVA} ${TESTVMOPTS} -version 2>&1 | grep -c "interpreted mode"`
+is_int=`${JAVA} ${TESTOPTS} -version 2>&1 | grep -c "interpreted mode"`
 # Not applicable for Xint
 if [ $is_int -ne 0 ]
 then
@@ -168,14 +169,14 @@
 
 cleanup
 
-client_available=`${JAVA} ${TESTVMOPTS} -client -Xinternalversion 2>&1 | \
+client_available=`${JAVA} ${TESTOPTS} -client -Xinternalversion 2>&1 | \
         grep -c Client`
-server_available=`${JAVA} ${TESTVMOPTS} -server -Xinternalversion 2>&1 | \
+server_available=`${JAVA} ${TESTOPTS} -server -Xinternalversion 2>&1 | \
         grep -c Server`
-tiered_available=`${JAVA} ${TESTVMOPTS} -XX:+TieredCompilation -XX:+PrintFlagsFinal -version | \
+tiered_available=`${JAVA} ${TESTOPTS} -XX:+TieredCompilation -XX:+PrintFlagsFinal -version | \
         grep TieredCompilation | \
         grep -c true`
-is_tiered=`${JAVA} ${TESTVMOPTS} -XX:+PrintFlagsFinal -version | \
+is_tiered=`${JAVA} ${TESTOPTS} -XX:+PrintFlagsFinal -version | \
         grep TieredCompilation | \
         grep -c true`
 # CompLevel_simple -- C1
@@ -207,7 +208,7 @@
         fi
     fi
 
-    cmd="${JAVA} ${TESTVMOPTS} $@ \
+    cmd="${JAVA} ${TESTOPTS} $@ \
             -Xms8m \
             -Xmx32m \
             -XX:MetaspaceSize=4m \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/classUnloading/methodUnloading/TestMethodUnloading.java	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import sun.hotspot.WhiteBox;
+
+import java.lang.reflect.Method;
+import java.net.URL;
+import java.net.URLClassLoader;
+
+/*
+ * @test MethodUnloadingTest
+ * @bug 8029443
+ * @summary "Tests the unloading of methods to to class unloading"
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestMethodUnloading
+ * @build WorkerClass
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:-BackgroundCompilation -XX:-UseCompressedOops -XX:+UseParallelGC -XX:CompileOnly=TestMethodUnloading::doWork TestMethodUnloading
+ */
+public class TestMethodUnloading {
+    private static final String workerClassName = "WorkerClass";
+    private static int work = -1;
+
+    private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+    private static int COMP_LEVEL_SIMPLE = 1;
+    private static int COMP_LEVEL_FULL_OPTIMIZATION = 4;
+
+    /**
+     * Does some work by either using the workerClass or locally producing values.
+     * @param workerClass Class performing some work (will be unloaded)
+     * @param useWorker If true the workerClass is used
+     */
+    static private void doWork(Class<?> workerClass, boolean useWorker) throws InstantiationException, IllegalAccessException {
+        if (useWorker) {
+            // Create a new instance
+            Object worker = workerClass.newInstance();
+            // We would like to call a method of WorkerClass here but we cannot cast to WorkerClass
+            // because the class was loaded by a different class loader. One solution would be to use
+            // reflection but since we want C2 to implement the call as an optimized IC we call
+            // Object::hashCode() here which actually calls WorkerClass::hashCode().
+            // C2 will then implement this call as an optimized IC that points to a to-interpreter stub
+            // referencing the Method* for WorkerClass::hashCode().
+            work = worker.hashCode();
+            if (work != 42) {
+                new RuntimeException("Work not done");
+            }
+        } else {
+            // Do some important work here
+            work = 1;
+        }
+    }
+
+    /**
+     * Makes sure that method is compiled by forcing compilation if not yet compiled.
+     * @param m Method to be checked
+     */
+    static private void makeSureIsCompiled(Method m) {
+        // Make sure background compilation is disabled
+        if (WHITE_BOX.getBooleanVMFlag("BackgroundCompilation")) {
+            throw new RuntimeException("Background compilation enabled");
+        }
+
+        // Check if already compiled
+        if (!WHITE_BOX.isMethodCompiled(m)) {
+            // If not, try to compile it with C2
+            if(!WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_FULL_OPTIMIZATION)) {
+                // C2 compiler not available, try to compile with C1
+                WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_SIMPLE);
+            }
+            // Because background compilation is disabled, method should now be compiled
+            if(!WHITE_BOX.isMethodCompiled(m)) {
+                throw new RuntimeException(m + " not compiled");
+            }
+        }
+    }
+
+    /**
+     * This test creates stale Method* metadata in a to-interpreter stub of an optimized IC.
+     *
+     * The following steps are performed:
+     * (1) A workerClass is loaded by a custom class loader
+     * (2) The method doWork that calls a method of the workerClass is compiled. The call
+     *     is implemented as an optimized IC calling a to-interpreted stub. The to-interpreter
+     *     stub contains a Method* to a workerClass method.
+     * (3) Unloading of the workerClass is enforced. The to-interpreter stub now contains a dead Method*.
+     * (4) Depending on the implementation of the IC, the compiled version of doWork should still be
+     *     valid. We call it again without using the workerClass.
+     */
+    static public void main(String[] args) throws Exception {
+        // (1) Create a custom class loader with no parent class loader
+        URL url = TestMethodUnloading.class.getProtectionDomain().getCodeSource().getLocation();
+        URLClassLoader loader = new URLClassLoader(new URL[] {url}, null);
+
+        // Load worker class with custom class loader
+        Class<?> workerClass = Class.forName(workerClassName, true, loader);
+
+        // (2) Make sure all paths of doWork are profiled and compiled
+        for (int i = 0; i < 100000; ++i) {
+            doWork(workerClass, true);
+            doWork(workerClass, false);
+        }
+
+        // Make sure doWork is compiled now
+        Method doWork = TestMethodUnloading.class.getDeclaredMethod("doWork", Class.class, boolean.class);
+        makeSureIsCompiled(doWork);
+
+        // (3) Throw away class loader and reference to workerClass to allow unloading
+        loader.close();
+        loader = null;
+        workerClass = null;
+
+        // Force garbage collection to trigger unloading of workerClass
+        // Dead reference to WorkerClass::hashCode triggers JDK-8029443
+        WHITE_BOX.fullGC();
+
+        // (4) Depending on the implementation of the IC, the compiled version of doWork
+        // may still be valid here. Execute it without a workerClass.
+        doWork(null, false);
+        if (work != 1) {
+            throw new RuntimeException("Work not done");
+        }
+
+        doWork(Object.class, false);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/classUnloading/methodUnloading/WorkerClass.java	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * Worker class that is dynamically loaded/unloaded by TestMethodUnloading.
+ */
+public class WorkerClass {
+    /**
+     * We override hashCode here to be able to access this implementation
+     * via an Object reference (we cannot cast to WorkerClass).
+     */
+    @Override
+    public int hashCode() {
+        return 42;
+    }
+}
+
--- a/hotspot/test/compiler/intrinsics/bmi/TestAndnI.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/TestAndnI.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build TestAndnI BMITestRunner Expr
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestAndnI
  */
--- a/hotspot/test/compiler/intrinsics/bmi/TestAndnL.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/TestAndnL.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build TestAndnL BMITestRunner Expr
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestAndnL
  */
--- a/hotspot/test/compiler/intrinsics/bmi/TestBlsiI.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/TestBlsiI.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build TestBlsiI BMITestRunner Expr
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestBlsiI
  */
--- a/hotspot/test/compiler/intrinsics/bmi/TestBlsiL.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/TestBlsiL.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build TestBlsiL BMITestRunner Expr
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestBlsiL
  */
--- a/hotspot/test/compiler/intrinsics/bmi/TestBlsmskI.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/TestBlsmskI.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build TestBlsmskI BMITestRunner Expr
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestBlsmskI
  */
--- a/hotspot/test/compiler/intrinsics/bmi/TestBlsmskL.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/TestBlsmskL.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build TestBlsmskL BMITestRunner Expr
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestBlsmskL
  */
--- a/hotspot/test/compiler/intrinsics/bmi/TestBlsrI.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/TestBlsrI.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build TestBlsrI BMITestRunner Expr
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestBlsrI
  */
--- a/hotspot/test/compiler/intrinsics/bmi/TestBlsrL.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/TestBlsrL.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build TestBlsrL BMITestRunner Expr
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestBlsrL
  */
--- a/hotspot/test/compiler/intrinsics/bmi/TestLzcntI.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/TestLzcntI.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build TestLzcntI BMITestRunner Expr
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestLzcntI
  */
--- a/hotspot/test/compiler/intrinsics/bmi/TestLzcntL.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/TestLzcntL.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build TestLzcntL BMITestRunner Expr
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestLzcntL
  */
--- a/hotspot/test/compiler/intrinsics/bmi/TestTzcntI.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/TestTzcntI.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build TestTzcntI BMITestRunner Expr
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestTzcntI
  */
--- a/hotspot/test/compiler/intrinsics/bmi/TestTzcntL.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/TestTzcntL.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build TestTzcntL BMITestRunner Expr
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestTzcntL
  */
--- a/hotspot/test/compiler/intrinsics/bmi/verifycode/AddnTestI.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/verifycode/AddnTestI.java	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
  * @build AddnTestI
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+UseBMI1Instructions AddnTestI
  */
--- a/hotspot/test/compiler/intrinsics/bmi/verifycode/AddnTestL.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/verifycode/AddnTestL.java	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
  * @build AddnTestL
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+UseBMI1Instructions AddnTestL
  */
--- a/hotspot/test/compiler/intrinsics/bmi/verifycode/BlsiTestI.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/verifycode/BlsiTestI.java	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
  * @build BlsiTestI
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+UseBMI1Instructions BlsiTestI
  */
--- a/hotspot/test/compiler/intrinsics/bmi/verifycode/BlsiTestL.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/verifycode/BlsiTestL.java	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
  * @build BlsiTestL
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+UseBMI1Instructions BlsiTestL
  */
--- a/hotspot/test/compiler/intrinsics/bmi/verifycode/BlsmskTestI.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/verifycode/BlsmskTestI.java	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
  * @build BlsmskTestI
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+UseBMI1Instructions BlsmskTestI
  */
--- a/hotspot/test/compiler/intrinsics/bmi/verifycode/BlsmskTestL.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/verifycode/BlsmskTestL.java	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
  * @build BlsmskTestL
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+UseBMI1Instructions BlsmskTestL
  */
--- a/hotspot/test/compiler/intrinsics/bmi/verifycode/BlsrTestI.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/verifycode/BlsrTestI.java	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
  * @build BlsrTestI
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+UseBMI1Instructions BlsrTestI
  */
--- a/hotspot/test/compiler/intrinsics/bmi/verifycode/BlsrTestL.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/verifycode/BlsrTestL.java	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
  * @build BlsrTestL
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+UseBMI1Instructions BlsrTestL
  */
--- a/hotspot/test/compiler/intrinsics/bmi/verifycode/LZcntTestI.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/verifycode/LZcntTestI.java	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
  * @build LZcntTestI
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCountLeadingZerosInstruction LZcntTestI
  */
--- a/hotspot/test/compiler/intrinsics/bmi/verifycode/LZcntTestL.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/verifycode/LZcntTestL.java	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
  * @build LZcntTestL
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCountLeadingZerosInstruction LZcntTestL
  */
--- a/hotspot/test/compiler/intrinsics/bmi/verifycode/TZcntTestI.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/verifycode/TZcntTestI.java	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
  * @build TZcntTestI
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCountTrailingZerosInstruction TZcntTestI
  */
--- a/hotspot/test/compiler/intrinsics/bmi/verifycode/TZcntTestL.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/bmi/verifycode/TZcntTestL.java	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
  * @build TZcntTestL
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCountTrailingZerosInstruction TZcntTestL
  */
--- a/hotspot/test/compiler/intrinsics/mathexact/sanity/AddExactIntTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/mathexact/sanity/AddExactIntTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
  * @build AddExactIntTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
--- a/hotspot/test/compiler/intrinsics/mathexact/sanity/AddExactLongTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/mathexact/sanity/AddExactLongTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
  * @build AddExactLongTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
--- a/hotspot/test/compiler/intrinsics/mathexact/sanity/DecrementExactIntTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/mathexact/sanity/DecrementExactIntTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
  * @build DecrementExactIntTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
--- a/hotspot/test/compiler/intrinsics/mathexact/sanity/DecrementExactLongTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/mathexact/sanity/DecrementExactLongTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
  * @build DecrementExactLongTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
--- a/hotspot/test/compiler/intrinsics/mathexact/sanity/IncrementExactIntTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/mathexact/sanity/IncrementExactIntTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
  * @build IncrementExactIntTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
--- a/hotspot/test/compiler/intrinsics/mathexact/sanity/IncrementExactLongTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/mathexact/sanity/IncrementExactLongTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
  * @build IncrementExactLongTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
--- a/hotspot/test/compiler/intrinsics/mathexact/sanity/MultiplyExactIntTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/mathexact/sanity/MultiplyExactIntTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
  * @build MultiplyExactIntTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
--- a/hotspot/test/compiler/intrinsics/mathexact/sanity/MultiplyExactLongTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/mathexact/sanity/MultiplyExactLongTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
  * @build MultiplyExactLongTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
--- a/hotspot/test/compiler/intrinsics/mathexact/sanity/NegateExactIntTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/mathexact/sanity/NegateExactIntTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
  * @build NegateExactIntTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
--- a/hotspot/test/compiler/intrinsics/mathexact/sanity/NegateExactLongTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/mathexact/sanity/NegateExactLongTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
  * @build NegateExactLongTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
--- a/hotspot/test/compiler/intrinsics/mathexact/sanity/SubtractExactIntTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/mathexact/sanity/SubtractExactIntTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
  * @build SubtractExactIntTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
--- a/hotspot/test/compiler/intrinsics/mathexact/sanity/SubtractExactLongTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/intrinsics/mathexact/sanity/SubtractExactLongTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
  * @build SubtractExactLongTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/macronodes/TestEliminateAllocationPhi.java	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8046698
+ * @summary PhiNode inserted between AllocateNode and Initialization node confuses allocation elimination
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestEliminateAllocationPhi
+ *
+ */
+
+public class TestEliminateAllocationPhi {
+
+    // This will return I when called from m(0 and once optimized will
+    // go away but this will confuse escape analysis in m(): it will
+    // find I as non escaping but non scalar replaceable. In its own
+    // method so that we can make the profile of the if() branch look
+    // like it's taken sometimes.
+    static Integer m2(Integer I, int i) {
+        for (; i < 10; i=(i+2)*(i+2)) {
+        }
+        if (i == 121) {
+            return II;
+        }
+        return I;
+    }
+
+    static Integer II = new Integer(42);
+
+    static int m(int[] integers, boolean flag) {
+        int j = 0;
+        while(true) {
+            try {
+                int k = integers[j++];
+                // A branch that will cause loop unswitching
+                if (flag) {
+                    k += 42;
+                }
+                if (k < 1000) {
+                    throw new Exception();
+                }
+                // Because of the try/catch the Allocate node for this
+                // new will be in the loop while the Initialization
+                // node will be outside the loop. When loop
+                // unswitching happens, the Allocate node will be
+                // cloned and the results of both will be inputs to a
+                // Phi that will be between the Allocate nodes and the
+                // Initialization nodes.
+                Integer I = new Integer(k);
+
+                I = m2(I, 0);
+
+                int i = I.intValue();
+                return i;
+            } catch(Exception e) {
+            }
+        }
+    }
+
+    static public void main(String[] args) {
+        for (int i = 0; i < 5000; i++) {
+            m2(null, 1);
+        }
+
+        int[] integers = { 2000 };
+        for (int i = 0; i < 6000; i++) {
+            m(integers, (i%2) == 0);
+        }
+        int[] integers2 = { 1, 2, 3, 4, 5, 2000 };
+        for (int i = 0; i < 10000; i++) {
+            m(integers2, (i%2) == 0);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/osr/TestOSRWithNonEmptyStack.java	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+
+import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.Label;
+import jdk.internal.org.objectweb.asm.MethodVisitor;
+import static jdk.internal.org.objectweb.asm.Opcodes.*;
+
+/**
+ * @test
+ * @bug 8051344
+ * @summary Force OSR compilation with non-empty stack at the OSR entry point.
+ * @compile -XDignore.symbol.file TestOSRWithNonEmptyStack.java
+ * @run main/othervm -XX:CompileOnly=TestCase.test TestOSRWithNonEmptyStack
+ */
+public class TestOSRWithNonEmptyStack extends ClassLoader {
+    private static final int CLASS_FILE_VERSION = 52;
+    private static final String CLASS_NAME = "TestCase";
+    private static final String METHOD_NAME = "test";
+    private static final int ITERATIONS = 1_000_000;
+
+    private static byte[] generateTestClass() {
+        ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_FRAMES);
+
+        cw.visit(TestOSRWithNonEmptyStack.CLASS_FILE_VERSION, ACC_PUBLIC,
+                TestOSRWithNonEmptyStack.CLASS_NAME, null, "java/lang/Object",
+                null);
+
+        TestOSRWithNonEmptyStack.generateConstructor(cw);
+        TestOSRWithNonEmptyStack.generateTestMethod(cw);
+
+        cw.visitEnd();
+        return cw.toByteArray();
+    }
+
+    private static void generateConstructor(ClassWriter classWriter) {
+        MethodVisitor mv = classWriter.visitMethod(ACC_PUBLIC, "<init>", "()V",
+                null, null);
+
+        mv.visitCode();
+
+        mv.visitVarInsn(ALOAD, 0);
+        mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "<init>", "()V",
+                false);
+        mv.visitInsn(RETURN);
+
+        mv.visitMaxs(0, 0);
+        mv.visitEnd();
+    }
+
+    private static void generateTestMethod(ClassWriter classWriter) {
+        MethodVisitor mv = classWriter.visitMethod(ACC_PUBLIC,
+                TestOSRWithNonEmptyStack.METHOD_NAME, "()V", null, null);
+        Label osrEntryPoint = new Label();
+
+        mv.visitCode();
+        // Push 'this' into stack before OSR entry point to bail out compilation
+        mv.visitVarInsn(ALOAD, 0);
+        // Setup loop counter
+        mv.visitInsn(ICONST_0);
+        mv.visitVarInsn(ISTORE, 1);
+        // Begin loop
+        mv.visitLabel(osrEntryPoint);
+        // Increment loop counter
+        mv.visitVarInsn(ILOAD, 1);
+        mv.visitInsn(ICONST_1);
+        mv.visitInsn(IADD);
+        // Duplicate it for loop condition check
+        mv.visitInsn(DUP);
+        mv.visitVarInsn(ISTORE, 1);
+        // Check loop condition
+        mv.visitLdcInsn(TestOSRWithNonEmptyStack.ITERATIONS);
+        mv.visitJumpInsn(IF_ICMPLT, osrEntryPoint);
+        // Pop 'this'.
+        mv.visitInsn(POP);
+        mv.visitInsn(RETURN);
+
+        mv.visitMaxs(0, 0);
+        mv.visitEnd();
+    }
+
+    private void run() {
+        byte[] bytecode = TestOSRWithNonEmptyStack.generateTestClass();
+
+        try {
+            Class klass = defineClass(TestOSRWithNonEmptyStack.CLASS_NAME,
+                    bytecode, 0, bytecode.length);
+
+            Constructor ctor = klass.getConstructor();
+            Method method = klass.getDeclaredMethod(
+                    TestOSRWithNonEmptyStack.METHOD_NAME);
+
+            Object testCase = ctor.newInstance();
+            method.invoke(testCase);
+        } catch (Exception e) {
+            throw new RuntimeException(
+                    "Test bug: generated class should be valid.", e);
+        }
+    }
+
+    public static void main(String args[]) {
+        new TestOSRWithNonEmptyStack().run();
+    }
+}
--- a/hotspot/test/compiler/rtm/cli/TestPrintPreciseRTMLockingStatisticsOptionOnSupportedConfig.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/cli/TestPrintPreciseRTMLockingStatisticsOptionOnSupportedConfig.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestPrintPreciseRTMLockingStatisticsOptionOnSupportedConfig
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI
  *                   TestPrintPreciseRTMLockingStatisticsOptionOnSupportedConfig
--- a/hotspot/test/compiler/rtm/cli/TestPrintPreciseRTMLockingStatisticsOptionOnUnsupportedConfig.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/cli/TestPrintPreciseRTMLockingStatisticsOptionOnUnsupportedConfig.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestPrintPreciseRTMLockingStatisticsOptionOnUnsupportedConfig
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI
  *                 TestPrintPreciseRTMLockingStatisticsOptionOnUnsupportedConfig
--- a/hotspot/test/compiler/rtm/cli/TestRTMAbortRatioOptionOnSupportedConfig.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/cli/TestRTMAbortRatioOptionOnSupportedConfig.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestRTMAbortRatioOptionOnSupportedConfig
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestRTMAbortRatioOptionOnSupportedConfig
  */
--- a/hotspot/test/compiler/rtm/cli/TestRTMAbortRatioOptionOnUnsupportedConfig.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/cli/TestRTMAbortRatioOptionOnUnsupportedConfig.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestRTMAbortRatioOptionOnUnsupportedConfig
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestRTMAbortRatioOptionOnUnsupportedConfig
  */
--- a/hotspot/test/compiler/rtm/cli/TestRTMTotalCountIncrRateOptionOnSupportedConfig.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/cli/TestRTMTotalCountIncrRateOptionOnSupportedConfig.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestRTMTotalCountIncrRateOptionOnSupportedConfig
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI
  *                   TestRTMTotalCountIncrRateOptionOnSupportedConfig
--- a/hotspot/test/compiler/rtm/cli/TestRTMTotalCountIncrRateOptionOnUnsupportedConfig.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/cli/TestRTMTotalCountIncrRateOptionOnUnsupportedConfig.java	Thu Aug 28 14:53:43 2014 -0700
@@ -35,6 +35,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestRTMTotalCountIncrRateOptionOnUnsupportedConfig
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI
  *                   TestRTMTotalCountIncrRateOptionOnUnsupportedConfig
--- a/hotspot/test/compiler/rtm/cli/TestUseRTMDeoptOptionOnSupportedConfig.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/cli/TestUseRTMDeoptOptionOnSupportedConfig.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestUseRTMDeoptOptionOnSupportedConfig
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestUseRTMDeoptOptionOnSupportedConfig
  */
--- a/hotspot/test/compiler/rtm/cli/TestUseRTMDeoptOptionOnUnsupportedConfig.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/cli/TestUseRTMDeoptOptionOnUnsupportedConfig.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestUseRTMDeoptOptionOnUnsupportedConfig
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestUseRTMDeoptOptionOnUnsupportedConfig
  */
--- a/hotspot/test/compiler/rtm/cli/TestUseRTMForStackLocksOptionOnSupportedConfig.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/cli/TestUseRTMForStackLocksOptionOnSupportedConfig.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestUseRTMForStackLocksOptionOnSupportedConfig
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI
  *                   TestUseRTMForStackLocksOptionOnSupportedConfig
--- a/hotspot/test/compiler/rtm/cli/TestUseRTMForStackLocksOptionOnUnsupportedConfig.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/cli/TestUseRTMForStackLocksOptionOnUnsupportedConfig.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestUseRTMForStackLocksOptionOnUnsupportedConfig
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI
  *                    TestUseRTMForStackLocksOptionOnUnsupportedConfig
--- a/hotspot/test/compiler/rtm/cli/TestUseRTMLockingOptionOnSupportedConfig.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/cli/TestUseRTMLockingOptionOnSupportedConfig.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestUseRTMLockingOptionOnSupportedConfig
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestUseRTMLockingOptionOnSupportedConfig
  */
--- a/hotspot/test/compiler/rtm/cli/TestUseRTMLockingOptionOnUnsupportedCPU.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/cli/TestUseRTMLockingOptionOnUnsupportedCPU.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestUseRTMLockingOptionOnUnsupportedCPU
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestUseRTMLockingOptionOnUnsupportedCPU
  */
--- a/hotspot/test/compiler/rtm/cli/TestUseRTMLockingOptionOnUnsupportedVM.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/cli/TestUseRTMLockingOptionOnUnsupportedVM.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestUseRTMLockingOptionOnUnsupportedVM
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestUseRTMLockingOptionOnUnsupportedVM
  */
--- a/hotspot/test/compiler/rtm/cli/TestUseRTMLockingOptionWithBiasedLocking.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/cli/TestUseRTMLockingOptionWithBiasedLocking.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestUseRTMLockingOptionWithBiasedLocking
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestUseRTMLockingOptionWithBiasedLocking
  */
--- a/hotspot/test/compiler/rtm/locking/TestRTMAbortRatio.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/locking/TestRTMAbortRatio.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestRTMAbortRatio
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestRTMAbortRatio
  */
--- a/hotspot/test/compiler/rtm/locking/TestRTMAbortThreshold.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/locking/TestRTMAbortThreshold.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestRTMAbortThreshold
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestRTMAbortThreshold
  */
--- a/hotspot/test/compiler/rtm/locking/TestRTMAfterNonRTMDeopt.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/locking/TestRTMAfterNonRTMDeopt.java	Thu Aug 28 14:53:43 2014 -0700
@@ -32,6 +32,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestRTMAfterNonRTMDeopt
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestRTMAfterNonRTMDeopt
  */
--- a/hotspot/test/compiler/rtm/locking/TestRTMDeoptOnHighAbortRatio.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/locking/TestRTMDeoptOnHighAbortRatio.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestRTMDeoptOnHighAbortRatio
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestRTMDeoptOnHighAbortRatio
  */
--- a/hotspot/test/compiler/rtm/locking/TestRTMDeoptOnLowAbortRatio.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/locking/TestRTMDeoptOnLowAbortRatio.java	Thu Aug 28 14:53:43 2014 -0700
@@ -29,6 +29,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestRTMDeoptOnLowAbortRatio
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestRTMDeoptOnLowAbortRatio
  */
--- a/hotspot/test/compiler/rtm/locking/TestRTMLockingCalculationDelay.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/locking/TestRTMLockingCalculationDelay.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestRTMLockingCalculationDelay
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestRTMLockingCalculationDelay
  */
--- a/hotspot/test/compiler/rtm/locking/TestRTMLockingThreshold.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/locking/TestRTMLockingThreshold.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestRTMLockingThreshold
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestRTMLockingThreshold
  */
--- a/hotspot/test/compiler/rtm/locking/TestRTMRetryCount.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/locking/TestRTMRetryCount.java	Thu Aug 28 14:53:43 2014 -0700
@@ -29,6 +29,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestRTMRetryCount
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestRTMRetryCount
  */
--- a/hotspot/test/compiler/rtm/locking/TestRTMSpinLoopCount.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/locking/TestRTMSpinLoopCount.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestRTMSpinLoopCount
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestRTMSpinLoopCount
  */
--- a/hotspot/test/compiler/rtm/locking/TestRTMTotalCountIncrRate.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/locking/TestRTMTotalCountIncrRate.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestRTMTotalCountIncrRate
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestRTMTotalCountIncrRate
  */
--- a/hotspot/test/compiler/rtm/locking/TestUseRTMAfterLockInflation.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/locking/TestUseRTMAfterLockInflation.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestUseRTMAfterLockInflation
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestUseRTMAfterLockInflation
  */
--- a/hotspot/test/compiler/rtm/locking/TestUseRTMDeopt.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/locking/TestUseRTMDeopt.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestUseRTMDeopt
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestUseRTMDeopt
  */
--- a/hotspot/test/compiler/rtm/locking/TestUseRTMForInflatedLocks.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/locking/TestUseRTMForInflatedLocks.java	Thu Aug 28 14:53:43 2014 -0700
@@ -29,6 +29,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestUseRTMForInflatedLocks
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestUseRTMForInflatedLocks
  */
--- a/hotspot/test/compiler/rtm/locking/TestUseRTMForStackLocks.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/locking/TestUseRTMForStackLocks.java	Thu Aug 28 14:53:43 2014 -0700
@@ -29,6 +29,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestUseRTMForStackLocks
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestUseRTMForStackLocks
  */
--- a/hotspot/test/compiler/rtm/locking/TestUseRTMXendForLockBusy.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/locking/TestUseRTMXendForLockBusy.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestUseRTMXendForLockBusy
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestUseRTMXendForLockBusy
  */
--- a/hotspot/test/compiler/rtm/method_options/TestNoRTMLockElidingOption.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/method_options/TestNoRTMLockElidingOption.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestNoRTMLockElidingOption
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestNoRTMLockElidingOption
  */
--- a/hotspot/test/compiler/rtm/method_options/TestUseRTMLockElidingOption.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/method_options/TestUseRTMLockElidingOption.java	Thu Aug 28 14:53:43 2014 -0700
@@ -31,6 +31,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestUseRTMLockElidingOption
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestUseRTMLockElidingOption
  */
--- a/hotspot/test/compiler/rtm/print/TestPrintPreciseRTMLockingStatistics.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/rtm/print/TestPrintPreciseRTMLockingStatistics.java	Thu Aug 28 14:53:43 2014 -0700
@@ -32,6 +32,7 @@
  * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestPrintPreciseRTMLockingStatistics
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
  *                   -XX:+WhiteBoxAPI TestPrintPreciseRTMLockingStatistics
  */
--- a/hotspot/test/compiler/tiered/NonTieredLevelsTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/tiered/NonTieredLevelsTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -29,6 +29,7 @@
  * @ignore 8046268
  * @build NonTieredLevelsTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:-TieredCompilation
  *                   -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
  *                   -XX:CompileCommand=compileonly,SimpleTestCase$Helper::*
--- a/hotspot/test/compiler/tiered/TieredLevelsTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/tiered/TieredLevelsTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
  * @ignore 8046268
  * @build TieredLevelsTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+TieredCompilation
  *                   -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
  *                   -XX:CompileCommand=compileonly,SimpleTestCase$Helper::*
--- a/hotspot/test/compiler/types/correctness/CorrectnessTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/types/correctness/CorrectnessTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -29,6 +29,7 @@
  *          execution/MethodHandleDelegate.java
  * @build CorrectnessTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions
  *                   -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
  *                   -XX:TypeProfileLevel=222 -XX:+UseTypeSpeculation
--- a/hotspot/test/compiler/types/correctness/OffTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/types/correctness/OffTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @build CorrectnessTest
  * @build OffTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/timeout=1200 OffTest
  */
 
--- a/hotspot/test/compiler/whitebox/ClearMethodStateTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/whitebox/ClearMethodStateTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -30,6 +30,7 @@
  * @ignore 8046268
  * @build ClearMethodStateTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* ClearMethodStateTest
  * @summary testing of WB::clearMethodState()
  * @author igor.ignatyev@oracle.com
--- a/hotspot/test/compiler/whitebox/DeoptimizeAllTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/whitebox/DeoptimizeAllTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -28,6 +28,7 @@
  * @ignore 8046268
  * @build DeoptimizeAllTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* DeoptimizeAllTest
  * @summary testing of WB::deoptimizeAll()
  * @author igor.ignatyev@oracle.com
--- a/hotspot/test/compiler/whitebox/DeoptimizeMethodTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/whitebox/DeoptimizeMethodTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -28,6 +28,7 @@
  * @ignore 8046268
  * @build DeoptimizeMethodTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* DeoptimizeMethodTest
  * @summary testing of WB::deoptimizeMethod()
  * @author igor.ignatyev@oracle.com
--- a/hotspot/test/compiler/whitebox/EnqueueMethodForCompilationTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/whitebox/EnqueueMethodForCompilationTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -28,6 +28,7 @@
  * @ignore 8046268
  * @build EnqueueMethodForCompilationTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm/timeout=600 -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* EnqueueMethodForCompilationTest
  * @summary testing of WB::enqueueMethodForCompilation()
  * @author igor.ignatyev@oracle.com
--- a/hotspot/test/compiler/whitebox/GetNMethodTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/whitebox/GetNMethodTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -31,6 +31,7 @@
  * @ignore 8046268
  * @build GetNMethodTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* GetNMethodTest
  * @summary testing of WB::getNMethod()
  * @author igor.ignatyev@oracle.com
--- a/hotspot/test/compiler/whitebox/IsMethodCompilableTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/whitebox/IsMethodCompilableTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox /testlibrary/com/oracle/java/testlibrary
  * @build IsMethodCompilableTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main ClassFileInstaller com.oracle.java.testlibrary.Platform
  * @run main/othervm/timeout=2400 -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:PerMethodRecompilationCutoff=3 -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* IsMethodCompilableTest
  * @summary testing of WB::isMethodCompilable()
--- a/hotspot/test/compiler/whitebox/MakeMethodNotCompilableTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/whitebox/MakeMethodNotCompilableTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -28,6 +28,7 @@
  * @ignore 8046268
  * @build MakeMethodNotCompilableTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm/timeout=2400 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* MakeMethodNotCompilableTest
  * @summary testing of WB::makeMethodNotCompilable()
  * @author igor.ignatyev@oracle.com
--- a/hotspot/test/compiler/whitebox/SetDontInlineMethodTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/whitebox/SetDontInlineMethodTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build SetDontInlineMethodTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* SetDontInlineMethodTest
  * @summary testing of WB::testSetDontInlineMethod()
  * @author igor.ignatyev@oracle.com
--- a/hotspot/test/compiler/whitebox/SetForceInlineMethodTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/compiler/whitebox/SetForceInlineMethodTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build SetForceInlineMethodTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* SetForceInlineMethodTest
  * @summary testing of WB::testSetForceInlineMethod()
  * @author igor.ignatyev@oracle.com
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/arguments/TestArrayAllocatorMallocLimit.java	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestArrayAllocatorMallocLimit
+ * @summary Sanity check that the ArrayAllocatorMallocLimit flag can be set.
+ * The test helps verifying that size_t flags can be set/read.
+ * @bug 8054823
+ * @key gc
+ * @library /testlibrary
+ * @run driver TestArrayAllocatorMallocLimit
+ */
+
+import com.oracle.java.testlibrary.Asserts;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+import java.math.BigInteger;
+
+public class TestArrayAllocatorMallocLimit {
+  public static void main(String [] args) throws Exception {
+    testDefaultValue();
+    testSetValue();
+  }
+
+  private static final String flagName = "ArrayAllocatorMallocLimit";
+
+  //     size_t ArrayAllocatorMallocLimit                 = 18446744073709551615{experimental}
+  private static final String printFlagsFinalPattern = " *size_t *" + flagName + " *:?= *(\\d+) *\\{experimental\\} *";
+
+  public static void testDefaultValue()  throws Exception {
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+      "-XX:+UnlockExperimentalVMOptions", "-XX:+PrintFlagsFinal", "-version");
+
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+    String value = output.firstMatch(printFlagsFinalPattern, 1);
+
+    try {
+      Asserts.assertNotNull(value, "Couldn't find size_t flag " + flagName);
+
+      // A size_t is not always parseable with Long.parseValue,
+      // use BigInteger instead.
+      BigInteger biValue = new BigInteger(value);
+
+      // Sanity check that we got a non-zero value.
+      Asserts.assertNotEquals(biValue, "0");
+
+      output.shouldHaveExitValue(0);
+    } catch (Exception e) {
+      System.err.println(output.getOutput());
+      throw e;
+    }
+  }
+
+  public static void testSetValue() throws Exception {
+    long flagValue = 2048;
+
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+      "-XX:+UnlockExperimentalVMOptions", "-XX:" + flagName + "=" + flagValue, "-XX:+PrintFlagsFinal", "-version");
+
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+    String value = output.firstMatch(printFlagsFinalPattern, 1);
+
+    try {
+      Asserts.assertNotNull("Couldn't find size_t flag " + flagName);
+
+      long longValue = Long.parseLong(value);
+
+      Asserts.assertEquals(longValue, flagValue);
+
+      output.shouldHaveExitValue(0);
+    } catch (Exception e) {
+      System.err.println(output.getOutput());
+      throw e;
+    }
+  }
+
+}
--- a/hotspot/test/gc/arguments/TestCMSHeapSizeFlags.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/gc/arguments/TestCMSHeapSizeFlags.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build TestCMSHeapSizeFlags TestMaxHeapSizeTools
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm TestCMSHeapSizeFlags
  * @author thomas.schatzl@oracle.com
  */
--- a/hotspot/test/gc/arguments/TestG1HeapSizeFlags.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/gc/arguments/TestG1HeapSizeFlags.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build TestG1HeapSizeFlags TestMaxHeapSizeTools
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm TestG1HeapSizeFlags
  * @author thomas.schatzl@oracle.com
  */
--- a/hotspot/test/gc/arguments/TestMinInitialErgonomics.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/gc/arguments/TestMinInitialErgonomics.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build TestMinInitialErgonomics TestMaxHeapSizeTools
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm TestMinInitialErgonomics
  * @author thomas.schatzl@oracle.com
  */
--- a/hotspot/test/gc/arguments/TestParallelHeapSizeFlags.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/gc/arguments/TestParallelHeapSizeFlags.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build TestParallelHeapSizeFlags TestMaxHeapSizeTools
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm TestParallelHeapSizeFlags
  * @author thomas.schatzl@oracle.com
  */
--- a/hotspot/test/gc/arguments/TestSerialHeapSizeFlags.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/gc/arguments/TestSerialHeapSizeFlags.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build TestSerialHeapSizeFlags TestMaxHeapSizeTools
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm TestSerialHeapSizeFlags
  * @author thomas.schatzl@oracle.com
  */
--- a/hotspot/test/gc/arguments/TestUseCompressedOopsErgo.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/gc/arguments/TestUseCompressedOopsErgo.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2013, 2014 Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build TestUseCompressedOopsErgo TestUseCompressedOopsErgoTools
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm TestUseCompressedOopsErgo -XX:+UseG1GC
  * @run main/othervm TestUseCompressedOopsErgo -XX:+UseParallelGC
  * @run main/othervm TestUseCompressedOopsErgo -XX:+UseParallelGC -XX:-UseParallelOldGC
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/class_unloading/AllocateBeyondMetaspaceSize.java	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import sun.hotspot.WhiteBox;
+
+class AllocateBeyondMetaspaceSize {
+  public static Object dummy;
+
+  public static void main(String [] args) {
+    if (args.length != 2) {
+      throw new IllegalArgumentException("Usage: <MetaspaceSize> <YoungGenSize>");
+    }
+
+    long metaspaceSize = Long.parseLong(args[0]);
+    long youngGenSize = Long.parseLong(args[1]);
+
+    run(metaspaceSize, youngGenSize);
+  }
+
+  private static void run(long metaspaceSize, long youngGenSize) {
+    WhiteBox wb = WhiteBox.getWhiteBox();
+
+    long allocationBeyondMetaspaceSize  = metaspaceSize * 2;
+    long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize);
+
+    triggerYoungGC(youngGenSize);
+
+    wb.freeMetaspace(null, metaspace, metaspace);
+  }
+
+  private static void triggerYoungGC(long youngGenSize) {
+    long approxAllocSize = 32 * 1024;
+    long numAllocations  = 2 * youngGenSize / approxAllocSize;
+
+    for (long i = 0; i < numAllocations; i++) {
+      dummy = new byte[(int)approxAllocSize];
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @key gc
+ * @bug 8049831
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestCMSClassUnloadingEnabledHWM AllocateBeyondMetaspaceSize
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run driver TestCMSClassUnloadingEnabledHWM
+ * @summary Test that -XX:-CMSClassUnloadingEnabled will trigger a Full GC when more than MetaspaceSize metadata is allocated.
+ */
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+
+public class TestCMSClassUnloadingEnabledHWM {
+  private static long MetaspaceSize = 32 * 1024 * 1024;
+  private static long YoungGenSize  = 32 * 1024 * 1024;
+
+  private static OutputAnalyzer run(boolean enableUnloading) throws Exception {
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+      "-Xbootclasspath/a:.",
+      "-XX:+WhiteBoxAPI",
+      "-XX:MetaspaceSize=" + MetaspaceSize,
+      "-Xmn" + YoungGenSize,
+      "-XX:+UseConcMarkSweepGC",
+      "-XX:" + (enableUnloading ? "+" : "-") + "CMSClassUnloadingEnabled",
+      "-XX:+PrintHeapAtGC",
+      "-XX:+PrintGCDetails",
+      "AllocateBeyondMetaspaceSize",
+      "" + MetaspaceSize,
+      "" + YoungGenSize);
+    return new OutputAnalyzer(pb.start());
+  }
+
+  public static OutputAnalyzer runWithCMSClassUnloading() throws Exception {
+    return run(true);
+  }
+
+  public static OutputAnalyzer runWithoutCMSClassUnloading() throws Exception {
+    return run(false);
+  }
+
+  public static void testWithoutCMSClassUnloading() throws Exception {
+    // -XX:-CMSClassUnloadingEnabled is used, so we expect a full GC instead of a concurrent cycle.
+    OutputAnalyzer out = runWithoutCMSClassUnloading();
+
+    out.shouldMatch(".*Full GC.*");
+    out.shouldNotMatch(".*CMS Initial Mark.*");
+  }
+
+  public static void testWithCMSClassUnloading() throws Exception {
+    // -XX:+CMSClassUnloadingEnabled is used, so we expect a concurrent cycle instead of a full GC.
+    OutputAnalyzer out = runWithCMSClassUnloading();
+
+    out.shouldMatch(".*CMS Initial Mark.*");
+    out.shouldNotMatch(".*Full GC.*");
+  }
+
+  public static void main(String args[]) throws Exception {
+    testWithCMSClassUnloading();
+    testWithoutCMSClassUnloading();
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/class_unloading/TestG1ClassUnloadingHWM.java	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @key gc
+ * @bug 8049831
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestG1ClassUnloadingHWM AllocateBeyondMetaspaceSize
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run driver TestG1ClassUnloadingHWM
+ * @summary Test that -XX:-ClassUnloadingWithConcurrentMark will trigger a Full GC when more than MetaspaceSize metadata is allocated.
+ */
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+
+public class TestG1ClassUnloadingHWM {
+  private static long MetaspaceSize = 32 * 1024 * 1024;
+  private static long YoungGenSize  = 32 * 1024 * 1024;
+
+  private static OutputAnalyzer run(boolean enableUnloading) throws Exception {
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+      "-Xbootclasspath/a:.",
+      "-XX:+WhiteBoxAPI",
+      "-XX:MetaspaceSize=" + MetaspaceSize,
+      "-Xmn" + YoungGenSize,
+      "-XX:+UseG1GC",
+      "-XX:" + (enableUnloading ? "+" : "-") + "ClassUnloadingWithConcurrentMark",
+      "-XX:+PrintHeapAtGC",
+      "-XX:+PrintGCDetails",
+      "AllocateBeyondMetaspaceSize",
+      "" + MetaspaceSize,
+      "" + YoungGenSize);
+    return new OutputAnalyzer(pb.start());
+  }
+
+  public static OutputAnalyzer runWithG1ClassUnloading() throws Exception {
+    return run(true);
+  }
+
+  public static OutputAnalyzer runWithoutG1ClassUnloading() throws Exception {
+    return run(false);
+  }
+
+  public static void testWithoutG1ClassUnloading() throws Exception {
+    // -XX:-ClassUnloadingWithConcurrentMark is used, so we expect a full GC instead of a concurrent cycle.
+    OutputAnalyzer out = runWithoutG1ClassUnloading();
+
+    out.shouldMatch(".*Full GC.*");
+    out.shouldNotMatch(".*initial-mark.*");
+  }
+
+  public static void testWithG1ClassUnloading() throws Exception {
+    // -XX:+ClassUnloadingWithConcurrentMark is used, so we expect a concurrent cycle instead of a full GC.
+    OutputAnalyzer out = runWithG1ClassUnloading();
+
+    out.shouldMatch(".*initial-mark.*");
+    out.shouldNotMatch(".*Full GC.*");
+  }
+
+  public static void main(String args[]) throws Exception {
+    testWithG1ClassUnloading();
+    testWithoutG1ClassUnloading();
+  }
+}
+
--- a/hotspot/test/gc/g1/TestDeferredRSUpdate.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/gc/g1/TestDeferredRSUpdate.java	Thu Aug 28 14:53:43 2014 -0700
@@ -23,7 +23,7 @@
 
 /*
  * @test TestDeferredRSUpdate
- * @bug 8040977
+ * @bug 8040977 8052170
  * @summary Ensure that running with -XX:-G1DeferredRSUpdate does not crash the VM
  * @key gc
  * @library /testlibrary
@@ -38,6 +38,7 @@
 
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
                                                               "-Xmx10M",
+                                                              "-XX:+PrintGCDetails",
                                                               // G1DeferredRSUpdate is a develop option, but we cannot limit execution of this test to only debug VMs.
                                                               "-XX:+IgnoreUnrecognizedVMOptions",
                                                               "-XX:-G1DeferredRSUpdate",
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/g1/TestEagerReclaimHumongousRegions.java	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestEagerReclaimHumongousRegions
+ * @bug 8027959
+ * @summary Test to make sure that eager reclaim of humongous objects work. We simply try to fill
+ * up the heap with humongous objects that should be eagerly reclaimable to avoid Full GC.
+ * @key gc
+ * @library /testlibrary
+ */
+
+import java.util.regex.Pattern;
+import java.util.regex.Matcher;
+import java.util.LinkedList;
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+import com.oracle.java.testlibrary.Asserts;
+
+class ReclaimRegionFast {
+    public static final int M = 1024*1024;
+
+    public static LinkedList<Object> garbageList = new LinkedList<Object>();
+
+    public static void genGarbage() {
+        for (int i = 0; i < 32*1024; i++) {
+            garbageList.add(new int[100]);
+        }
+        garbageList.clear();
+    }
+
+    // A large object referenced by a static.
+    static int[] filler = new int[10 * M];
+
+    public static void main(String[] args) {
+
+        int[] large = new int[M];
+
+        Object ref_from_stack = large;
+
+        for (int i = 0; i < 100; i++) {
+            // A large object that will be reclaimed eagerly.
+            large = new int[6*M];
+            genGarbage();
+            // Make sure that the compiler cannot completely remove
+            // the allocation of the large object until here.
+            System.out.println(large);
+        }
+
+        // Keep the reference to the first object alive.
+        System.out.println(ref_from_stack);
+    }
+}
+
+public class TestEagerReclaimHumongousRegions {
+    public static void main(String[] args) throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UseG1GC",
+            "-Xms128M",
+            "-Xmx128M",
+            "-Xmn16M",
+            "-XX:+PrintGC",
+            ReclaimRegionFast.class.getName());
+
+        Pattern p = Pattern.compile("Full GC");
+
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+        int found = 0;
+        Matcher m = p.matcher(output.getStdout());
+        while (m.find()) { found++; }
+        System.out.println("Issued " + found + " Full GCs");
+        Asserts.assertLT(found, 10, "Found that " + found + " Full GCs were issued. This is larger than the bound. Eager reclaim seems to not work at all");
+
+        output.shouldHaveExitValue(0);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/g1/TestEagerReclaimHumongousRegions2.java	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestEagerReclaimHumongousRegions2
+ * @bug 8051973
+ * @summary Test to make sure that eager reclaim of humongous objects correctly clears
+ * mark bitmaps at reclaim.
+ * @key gc
+ * @library /testlibrary
+ */
+
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.Random;
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+
+// An object that has a few references to other instances to slow down marking.
+class ObjectWithSomeRefs {
+    public ObjectWithSomeRefs other1;
+    public ObjectWithSomeRefs other2;
+    public ObjectWithSomeRefs other3;
+    public ObjectWithSomeRefs other4;
+}
+
+class ReclaimRegionFast {
+    public static final int M = 1024*1024;
+
+    public static LinkedList<Object> garbageList = new LinkedList<Object>();
+
+    public static void genGarbage(Object large) {
+        for (int i = 0; i < 64*1024; i++) {
+            Object[] garbage = new Object[50];
+            garbage[0] = large;
+            garbageList.add(garbage);
+        }
+        garbageList.clear();
+    }
+
+    public static ArrayList<ObjectWithSomeRefs> longList = new ArrayList<ObjectWithSomeRefs>();
+
+    public static void main(String[] args) {
+
+        for (int i = 0; i < 16*1024; i++) {
+             longList.add(new ObjectWithSomeRefs());
+        }
+
+        Random rnd = new Random();
+        for (int i = 0; i < longList.size(); i++) {
+             int len = longList.size();
+             longList.get(i).other1 = longList.get(rnd.nextInt(len));
+             longList.get(i).other2 = longList.get(rnd.nextInt(len));
+             longList.get(i).other3 = longList.get(rnd.nextInt(len));
+             longList.get(i).other4 = longList.get(rnd.nextInt(len));
+        }
+
+        int[] large1 = new int[M];
+        int[] large2 = null;
+        int[] large3 = null;
+        int[] large4 = null;
+
+        Object ref_from_stack = large1;
+
+        for (int i = 0; i < 20; i++) {
+            // A set of large objects that will be reclaimed eagerly - and hopefully marked.
+            large1 = new int[M - 20];
+            large2 = new int[M - 20];
+            large3 = new int[M - 20];
+            large4 = new int[M - 20];
+            genGarbage(large1);
+            // Make sure that the compiler cannot completely remove
+            // the allocation of the large object until here.
+            System.out.println(large1 + " " + large2 + " " + large3 + " " + large4);
+        }
+
+        // Keep the reference to the first object alive.
+        System.out.println(ref_from_stack);
+    }
+}
+
+public class TestEagerReclaimHumongousRegions2 {
+    public static void main(String[] args) throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UseG1GC",
+            "-Xms128M",
+            "-Xmx128M",
+            "-Xmn2M",
+            "-XX:G1HeapRegionSize=1M",
+            "-XX:InitiatingHeapOccupancyPercent=0", // Want to have as much as possible initial marks.
+            "-XX:+PrintGC",
+            "-XX:+VerifyAfterGC",
+            "-XX:ConcGCThreads=1", // Want to make marking as slow as possible.
+            "-XX:+IgnoreUnrecognizedVMOptions", // G1VerifyBitmaps is develop only.
+            "-XX:+G1VerifyBitmaps",
+            ReclaimRegionFast.class.getName());
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+    }
+}
+
--- a/hotspot/test/gc/g1/TestGCLogMessages.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/gc/g1/TestGCLogMessages.java	Thu Aug 28 14:53:43 2014 -0700
@@ -23,7 +23,7 @@
 
 /*
  * @test TestGCLogMessages
- * @bug 8035406 8027295 8035398 8019342
+ * @bug 8035406 8027295 8035398 8019342 8027959
  * @summary Ensure that the PrintGCDetails output for a minor GC with G1
  * includes the expected necessary messages.
  * @key gc
@@ -54,6 +54,7 @@
     output.shouldNotContain("[String Dedup Fixup");
     output.shouldNotContain("[Young Free CSet");
     output.shouldNotContain("[Non-Young Free CSet");
+    output.shouldNotContain("[Humongous Reclaim");
     output.shouldHaveExitValue(0);
 
     pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
@@ -71,6 +72,10 @@
     output.shouldContain("[String Dedup Fixup");
     output.shouldNotContain("[Young Free CSet");
     output.shouldNotContain("[Non-Young Free CSet");
+    output.shouldContain("[Humongous Reclaim");
+    output.shouldNotContain("[Humongous Total");
+    output.shouldNotContain("[Humongous Candidate");
+    output.shouldNotContain("[Humongous Reclaimed");
     output.shouldHaveExitValue(0);
 
     pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
@@ -90,6 +95,10 @@
     output.shouldContain("[String Dedup Fixup");
     output.shouldContain("[Young Free CSet");
     output.shouldContain("[Non-Young Free CSet");
+    output.shouldContain("[Humongous Reclaim");
+    output.shouldContain("[Humongous Total");
+    output.shouldContain("[Humongous Candidate");
+    output.shouldContain("[Humongous Reclaimed");
     output.shouldHaveExitValue(0);
   }
 
--- a/hotspot/test/gc/g1/TestHumongousCodeCacheRoots.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/gc/g1/TestHumongousCodeCacheRoots.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build TestHumongousCodeCacheRoots
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @summary Humongous objects may have references from the code cache
  * @run main TestHumongousCodeCacheRoots
 */
--- a/hotspot/test/runtime/6626217/Test6626217.sh	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/6626217/Test6626217.sh	Thu Aug 28 14:53:43 2014 -0700
@@ -69,7 +69,7 @@
 ${MV} many_loader.impl1 many_loader.class
 ${RM} many_loader.java
 
-${JAVA} ${TESTVMOPTS} -Xverify -Xint -cp . bug_21227 >test.out 2>&1
+${JAVA} ${TESTOPTS} -Xverify -Xint -cp . bug_21227 >test.out 2>&1
 grep "loader constraint" test.out
 exit $?
 
--- a/hotspot/test/runtime/6888954/vmerrors.sh	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/6888954/vmerrors.sh	Thu Aug 28 14:53:43 2014 -0700
@@ -1,4 +1,4 @@
-# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,16 @@
 # export TESTJAVA TESTVMOPTS
 # sh test/runtime/6888954/vmerrors.sh
 
+if [ "${TESTSRC}" = "" ]
+then
+  TESTSRC=${PWD}
+  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
+fi
+echo "TESTSRC=${TESTSRC}"
+
+## Adding common setup Variables for running shell tests.
+. ${TESTSRC}/../../test_env.sh
+
 ulimit -c 0 # no core files
 
 i=1
@@ -84,7 +94,7 @@
     i2=$i
     [ $i -lt 10 ] && i2=0$i
 
-    "$TESTJAVA/bin/java" $TESTVMOPTS -XX:+IgnoreUnrecognizedVMOptions \
+    "$TESTJAVA/bin/java" $TESTOPTS -XX:+IgnoreUnrecognizedVMOptions \
         -XX:-TransmitErrorReport -XX:-CreateMinidumpOnCrash \
         -XX:ErrorHandlerTest=${i} -version > ${i2}.out 2>&1
 
--- a/hotspot/test/runtime/7107135/Test7107135.sh	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/7107135/Test7107135.sh	Thu Aug 28 14:53:43 2014 -0700
@@ -25,7 +25,6 @@
 #
 
 ##
-## @ignore 8025519
 ## @test Test7107135.sh
 ## @bug 7107135
 ## @bug 8021296
--- a/hotspot/test/runtime/7116786/Test7116786.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/7116786/Test7116786.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -147,7 +147,8 @@
                  "no stackmap frame at jump location or bad jump",
                  "Inconsistent stackmap frames at branch target "),
 
-        new Case("case15", "stackMapTable.cpp", true, "check_new_object",
+        /* Backward jump with uninit is allowed starting with JDK 8 */
+        new Case("case15", "stackMapTable.cpp", false, "check_new_object",
                  "backward jump with uninit",
                  "Uninitialized object exists on backward branch "),
 
--- a/hotspot/test/runtime/7162488/Test7162488.sh	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/7162488/Test7162488.sh	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 #
-#  Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+#  Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
 #  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 #  This code is free software; you can redistribute it and/or modify it
@@ -45,14 +45,14 @@
 #
 OPTION=this_is_not_an_option
 
-${JAVA} ${TESTVMOPTS} -showversion -XX:${OPTION} 2>&1 | grep "Unrecognized VM option" 
+${JAVA} -showversion -XX:${OPTION} 2>&1 | grep "Unrecognized VM option" 
 if [ "$?" != "0" ]
 then
   printf "FAILED: option not flagged as unrecognized.\n"
   exit 1
 fi
 
-${JAVA} ${TESTVMOPTS} -showversion -XX:${OPTION} 2>&1 | grep ${OPTION}
+${JAVA} -showversion -XX:${OPTION} 2>&1 | grep ${OPTION}
 if [ "$?" != "0" ]
 then
   printf "FAILED: bad option not named as being bad.\n"
--- a/hotspot/test/runtime/ClassUnload/KeepAliveClass.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/ClassUnload/KeepAliveClass.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
  * @build KeepAliveClass test.Empty
  * @build ClassUnloadCommon
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -Xmn8m -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI KeepAliveClass
  */
 
--- a/hotspot/test/runtime/ClassUnload/KeepAliveClassLoader.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/ClassUnload/KeepAliveClassLoader.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
  * @build KeepAliveClassLoader test.Empty
  * @build ClassUnloadCommon
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -Xmn8m -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI KeepAliveClassLoader
  */
 
--- a/hotspot/test/runtime/ClassUnload/KeepAliveObject.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/ClassUnload/KeepAliveObject.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
  * @build KeepAliveObject test.Empty
  * @build ClassUnloadCommon
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -Xmn8m -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI KeepAliveObject
  */
 
--- a/hotspot/test/runtime/ClassUnload/KeepAliveSoftReference.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/ClassUnload/KeepAliveSoftReference.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
  * @build KeepAliveSoftReference test.Empty
  * @build ClassUnloadCommon
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -Xmn8m -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI KeepAliveSoftReference
  */
 
--- a/hotspot/test/runtime/ClassUnload/UnloadTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/ClassUnload/UnloadTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
  * @build ClassUnloadCommon test.Empty
  * @build UnloadTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -Xmn8m -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI UnloadTest
  */
 import sun.hotspot.WhiteBox;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/AutoshutdownNMT.java	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @key nmt
+ * @summary Test for deprecated message if -XX:-AutoShutdownNMT is specified
+ * @library /testlibrary
+ * @ignore
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class AutoshutdownNMT {
+
+    public static void main(String args[]) throws Exception {
+
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                "-XX:NativeMemoryTracking=detail",
+                "-XX:-AutoShutdownNMT",
+                "-version");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("ignoring option AutoShutdownNMT");
+    }
+}
--- a/hotspot/test/runtime/NMT/BaselineWithParameter.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/NMT/BaselineWithParameter.java	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
  * @key nmt jcmd regression
  * @summary Regression test for invoking a jcmd with baseline=false, result was that the target VM crashed
  * @library /testlibrary
+ * @ignore
  * @run main/othervm -XX:NativeMemoryTracking=detail BaselineWithParameter
  */
 
--- a/hotspot/test/runtime/NMT/CommandLineDetail.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/NMT/CommandLineDetail.java	Thu Aug 28 14:53:43 2014 -0700
@@ -26,6 +26,7 @@
  * @key nmt
  * @summary Running with NMT detail should not result in an error
  * @library /testlibrary
+ * @ignore
  */
 
 import com.oracle.java.testlibrary.*;
--- a/hotspot/test/runtime/NMT/CommandLineEmptyArgument.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/NMT/CommandLineEmptyArgument.java	Thu Aug 28 14:53:43 2014 -0700
@@ -26,6 +26,7 @@
  * @key nmt
  * @summary Empty argument to NMT should result in an informative error message
  * @library /testlibrary
+ * @ignore
  */
 
 import com.oracle.java.testlibrary.*;
--- a/hotspot/test/runtime/NMT/CommandLineInvalidArgument.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/NMT/CommandLineInvalidArgument.java	Thu Aug 28 14:53:43 2014 -0700
@@ -26,6 +26,7 @@
  * @key nmt
  * @summary Invalid argument to NMT should result in an informative error message
  * @library /testlibrary
+ * @ignore
  */
 
 import com.oracle.java.testlibrary.*;
--- a/hotspot/test/runtime/NMT/CommandLineSummary.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/NMT/CommandLineSummary.java	Thu Aug 28 14:53:43 2014 -0700
@@ -26,6 +26,7 @@
  * @key nmt
  * @summary Running with NMT summary should not result in an error
  * @library /testlibrary
+ * @ignore
  */
 
 import com.oracle.java.testlibrary.*;
--- a/hotspot/test/runtime/NMT/CommandLineTurnOffNMT.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/NMT/CommandLineTurnOffNMT.java	Thu Aug 28 14:53:43 2014 -0700
@@ -26,6 +26,7 @@
  * @key nmt
  * @summary Turning off NMT should not result in an error
  * @library /testlibrary
+ * @ignore
  */
 
 import com.oracle.java.testlibrary.*;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/JcmdBaselineDetail.java	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @key nmt jcmd
+ * @summary Verify that jcmd correctly reports that baseline succeeds with NMT enabled with detailed tracking.
+ * @library /testlibrary
+ * @ignore
+ * @run main/othervm -XX:NativeMemoryTracking=detail JcmdBaselineDetail
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class JcmdBaselineDetail {
+
+    public static void main(String args[]) throws Exception {
+        // Grab my own PID
+        String pid = Integer.toString(ProcessTools.getProcessId());
+        OutputAnalyzer output;
+
+        ProcessBuilder pb = new ProcessBuilder();
+
+        // Run 'jcmd <pid> VM.native_memory baseline=true'
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "baseline=true"});
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Baseline succeeded");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/JcmdDetailDiff.java	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary run NMT baseline, allocate memory and verify output from detail.diff
+ * @key nmt jcmd
+ * @library /testlibrary /testlibrary/whitebox
+ * @ignore
+ * @build JcmdDetailDiff
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail JcmdDetailDiff
+ */
+
+import com.oracle.java.testlibrary.*;
+
+import sun.hotspot.WhiteBox;
+
+public class JcmdDetailDiff {
+
+    public static WhiteBox wb = WhiteBox.getWhiteBox();
+
+    public static void main(String args[]) throws Exception {
+        ProcessBuilder pb = new ProcessBuilder();
+        OutputAnalyzer output;
+        // Grab my own PID
+        String pid = Integer.toString(ProcessTools.getProcessId());
+
+        long commitSize = 128 * 1024;
+        long reserveSize = 256 * 1024;
+        long addr;
+
+        // Run 'jcmd <pid> VM.native_memory baseline=true'
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "baseline=true"});
+        pb.start().waitFor();
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Baseline succeeded");
+
+        addr = wb.NMTReserveMemory(reserveSize);
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail.diff", "scale=KB"});
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB +256KB, committed=0KB)");
+        output.shouldContain("WB_NMTReserveMemory");
+
+        wb.NMTCommitMemory(addr, commitSize);
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail.diff", "scale=KB"});
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB +256KB, committed=128KB +128KB)");
+        output.shouldContain("WB_NMTReserveMemory");
+
+        wb.NMTUncommitMemory(addr, commitSize);
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail.diff", "scale=KB"});
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB +256KB, committed=0KB)");
+        output.shouldContain("WB_NMTReserveMemory");
+
+        wb.NMTReleaseMemory(addr, reserveSize);
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail.diff", "scale=KB"});
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldNotContain("Test (reserved=");
+    }
+}
--- a/hotspot/test/runtime/NMT/JcmdScale.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/NMT/JcmdScale.java	Thu Aug 28 14:53:43 2014 -0700
@@ -26,6 +26,7 @@
  * @key nmt jcmd
  * @summary Test the NMT scale parameter
  * @library /testlibrary
+ * @ignore
  * @run main/othervm -XX:NativeMemoryTracking=summary JcmdScale
  */
 
@@ -41,15 +42,15 @@
 
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=KB"});
     output = new OutputAnalyzer(pb.start());
-    output.shouldContain("KB,  committed=");
+    output.shouldContain("KB, committed=");
 
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=MB"});
     output = new OutputAnalyzer(pb.start());
-    output.shouldContain("MB,  committed=");
+    output.shouldContain("MB, committed=");
 
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=GB"});
     output = new OutputAnalyzer(pb.start());
-    output.shouldContain("GB,  committed=");
+    output.shouldContain("GB, committed=");
 
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=apa"});
     output = new OutputAnalyzer(pb.start());
@@ -57,7 +58,7 @@
 
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary", "scale=GB"});
     output = new OutputAnalyzer(pb.start());
-    output.shouldContain("GB,  committed=");
+    output.shouldContain("GB, committed=");
 
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary", "scale=apa"});
     output = new OutputAnalyzer(pb.start());
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/JcmdScaleDetail.java	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @key nmt jcmd
+ * @summary Test the NMT scale parameter with detail tracking level
+ * @library /testlibrary
+ * @ignore
+ * @run main/othervm -XX:NativeMemoryTracking=detail JcmdScaleDetail
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class JcmdScaleDetail {
+
+    public static void main(String args[]) throws Exception {
+        ProcessBuilder pb = new ProcessBuilder();
+        OutputAnalyzer output;
+        // Grab my own PID
+        String pid = Integer.toString(ProcessTools.getProcessId());
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=KB"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("KB, committed=");
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=MB"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("MB, committed=");
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=GB"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("GB, committed=");
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=apa"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Incorrect scale value: apa");
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary", "scale=GB"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("GB, committed=");
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary", "scale=apa"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Incorrect scale value: apa");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/JcmdSummaryDiff.java	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary run NMT baseline, allocate memory and verify output from summary.diff
+ * @key nmt jcmd
+ * @library /testlibrary /testlibrary/whitebox
+ * @build JcmdSummaryDiff
+ * @ignore
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=summary JcmdSummaryDiff
+ */
+
+import com.oracle.java.testlibrary.*;
+
+import sun.hotspot.WhiteBox;
+
+public class JcmdSummaryDiff {
+
+    public static WhiteBox wb = WhiteBox.getWhiteBox();
+
+    public static void main(String args[]) throws Exception {
+        ProcessBuilder pb = new ProcessBuilder();
+        OutputAnalyzer output;
+        // Grab my own PID
+        String pid = Integer.toString(ProcessTools.getProcessId());
+
+        long commitSize = 128 * 1024;
+        long reserveSize = 256 * 1024;
+        long addr;
+
+        // Run 'jcmd <pid> VM.native_memory baseline=true'
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "baseline=true"});
+        pb.start().waitFor();
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Baseline succeeded");
+
+        addr = wb.NMTReserveMemory(reserveSize);
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary.diff", "scale=KB"});
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB +256KB, committed=0KB)");
+
+        wb.NMTCommitMemory(addr, commitSize);
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary.diff", "scale=KB"});
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB +256KB, committed=128KB +128KB)");
+
+        wb.NMTUncommitMemory(addr, commitSize);
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary.diff", "scale=KB"});
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB +256KB, committed=0KB)");
+
+        wb.NMTReleaseMemory(addr, reserveSize);
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary.diff", "scale=KB"});
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldNotContain("Test (reserved=");
+    }
+}
--- a/hotspot/test/runtime/NMT/JcmdWithNMTDisabled.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/NMT/JcmdWithNMTDisabled.java	Thu Aug 28 14:53:43 2014 -0700
@@ -26,6 +26,7 @@
  * @key nmt jcmd
  * @summary Verify that jcmd correctly reports that NMT is not enabled
  * @library /testlibrary
+ * @ignore
  * @run main JcmdWithNMTDisabled 1
  */
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/MallocRoundingReportTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Test consistency of NMT by creating allocations of the Test type with various sizes and verifying visibility with jcmd
+ * @key nmt jcmd
+ * @library /testlibrary /testlibrary/whitebox
+ * @build MallocRoundingReportTest
+ * @ignore
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocRoundingReportTest
+ *
+ */
+
+import com.oracle.java.testlibrary.*;
+
+import sun.hotspot.WhiteBox;
+
+public class MallocRoundingReportTest {
+    private static long K = 1024;
+
+    public static void main(String args[]) throws Exception {
+        OutputAnalyzer output;
+        WhiteBox wb = WhiteBox.getWhiteBox();
+
+        // Grab my own PID
+        String pid = Integer.toString(ProcessTools.getProcessId());
+        ProcessBuilder pb = new ProcessBuilder();
+
+        long[] additionalBytes = {0, 1, 512, 650};
+        long[] kByteSize = {1024, 2048};
+        long mallocd_total = 0;
+        for ( int i = 0; i < kByteSize.length; i++)
+        {
+            for (int j = 0; j < (additionalBytes.length); j++) {
+                long curKB = kByteSize[i] + additionalBytes[j];
+                // round up/down to the nearest KB to match NMT reporting
+                long numKB = (curKB % kByteSize[i] >= 512) ? ((curKB / K) + 1) : curKB / K;
+                // Use WB API to alloc and free with the mtTest type
+                mallocd_total = wb.NMTMalloc(curKB);
+                // Run 'jcmd <pid> VM.native_memory summary', check for expected output
+                // NMT does not track memory allocations less than 1KB, and rounds to the nearest KB
+                String expectedOut = ("Test (reserved=" + numKB + "KB, committed=" + numKB + "KB)");
+
+                pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary" });
+                output = new OutputAnalyzer(pb.start());
+                output.shouldContain(expectedOut);
+
+                wb.NMTFree(mallocd_total);
+                // Run 'jcmd <pid> VM.native_memory summary', check for expected output
+                pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary" });
+                output = new OutputAnalyzer(pb.start());
+                output.shouldNotContain("Test (reserved=");
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/MallocSiteHashOverflow.java	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @key stress
+ * @test
+ * @summary Test corner case that overflows malloc site hashtable bucket
+ * @key nmt jcmd
+ * @library /testlibrary /testlibrary/whitebox
+ * @ignore - This test is disabled since it will stress NMT and timeout during normal testing
+ * @build MallocSiteHashOverflow
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm/timeout=480 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocSiteHashOverflow
+ */
+
+import com.oracle.java.testlibrary.*;
+import sun.hotspot.WhiteBox;
+
+public class MallocSiteHashOverflow {
+    private static long K = 1024;
+    public static void main(String args[]) throws Exception {
+        String vm_name = System.getProperty("java.vm.name");
+
+        // For 32-bit systems, create 257 malloc sites with the same hash bucket to overflow a hash bucket
+        // For 64-bit systems, create 64K + 1 malloc sites with the same hash bucket to overflow a hash bucket
+        long entries = 257;
+        if (Platform.is64bit()) {
+            entries = 64 * K + 1;
+        }
+
+        OutputAnalyzer output;
+        WhiteBox wb = WhiteBox.getWhiteBox();
+
+        // Grab my own PID
+        String pid = Integer.toString(ProcessTools.getProcessId());
+        ProcessBuilder pb = new ProcessBuilder();
+
+        wb.NMTOverflowHashBucket(entries);
+
+        // Run 'jcmd <pid> VM.native_memory summary'
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "statistics"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Tracking level has been downgraded due to lack of resources");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/MallocStressTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @key stress
+ * @test
+ * @summary Stress test for malloc tracking
+ * @key nmt jcmd
+ * @library /testlibrary /testlibrary/whitebox
+ * @build MallocStressTest
+ * @ignore - This test is disabled since it will stress NMT and timeout during normal testing
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocStressTest
+ */
+
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import com.oracle.java.testlibrary.*;
+import sun.hotspot.WhiteBox;
+
+public class MallocStressTest {
+    private static int K = 1024;
+
+    // The stress test runs in three phases:
+    // 1. alloc: A lot of malloc with fewer free, which simulates a burst memory allocation
+    //    that is usually seen during startup or class loading.
+    // 2. pause: Pause the test to check accuracy of native memory tracking
+    // 3. release: Release all malloc'd memory and check native memory tracking result.
+    public enum TestPhase {
+        alloc,
+        pause,
+        release
+    };
+
+    static TestPhase phase = TestPhase.alloc;
+
+    // malloc'd memory
+    static ArrayList<MallocMemory>  mallocd_memory = new ArrayList<MallocMemory>();
+    static long                     mallocd_total  = 0;
+    static WhiteBox                 whiteBox;
+    static AtomicInteger            pause_count = new AtomicInteger();
+
+    static boolean                  is_64_bit_system;
+
+    private static boolean is_64_bit_system() { return is_64_bit_system; }
+
+    public static void main(String args[]) throws Exception {
+        is_64_bit_system = (Platform.is64bit());
+
+        OutputAnalyzer output;
+        whiteBox = WhiteBox.getWhiteBox();
+
+        // Grab my own PID
+        String pid = Integer.toString(ProcessTools.getProcessId());
+        ProcessBuilder pb = new ProcessBuilder();
+
+        AllocThread[]   alloc_threads = new AllocThread[256];
+        ReleaseThread[] release_threads = new ReleaseThread[64];
+
+        int index;
+        // Create many allocation threads
+        for (index = 0; index < alloc_threads.length; index ++) {
+            alloc_threads[index] = new AllocThread();
+        }
+
+        // Fewer release threads
+        for (index = 0; index < release_threads.length; index ++) {
+            release_threads[index] = new ReleaseThread();
+        }
+
+        if (is_64_bit_system()) {
+            sleep_wait(2*60*1000);
+        } else {
+            sleep_wait(60*1000);
+        }
+        // pause the stress test
+        phase = TestPhase.pause;
+        while (pause_count.intValue() <  alloc_threads.length + release_threads.length) {
+            sleep_wait(10);
+        }
+
+        long mallocd_total_in_KB = (mallocd_total + K / 2) / K;
+
+        // Now check if the result from NMT matches the total memory allocated.
+        String expected_test_summary = "Test (reserved=" + mallocd_total_in_KB +"KB, committed=" + mallocd_total_in_KB + "KB)";
+        // Run 'jcmd <pid> VM.native_memory summary'
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain(expected_test_summary);
+
+        // Release all allocated memory
+        phase = TestPhase.release;
+        synchronized(mallocd_memory) {
+            mallocd_memory.notifyAll();
+        }
+
+        // Join all threads
+        for (index = 0; index < alloc_threads.length; index ++) {
+            try {
+                alloc_threads[index].join();
+            } catch (InterruptedException e) {
+            }
+        }
+
+        for (index = 0; index < release_threads.length; index ++) {
+            try {
+                release_threads[index].join();
+            } catch (InterruptedException e) {
+            }
+        }
+
+        // All test memory allocated should be released
+        output = new OutputAnalyzer(pb.start());
+        output.shouldNotContain("Test (reserved=");
+
+        // Verify that tracking level has not been downgraded
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "statistics"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldNotContain("Tracking level has been downgraded due to lack of resources");
+    }
+
+    private static void sleep_wait(int n) {
+        try {
+            Thread.sleep(n);
+        } catch (InterruptedException e) {
+        }
+    }
+
+
+    static class MallocMemory {
+        private long  addr;
+        private int   size;
+
+        MallocMemory(long addr, int size) {
+            this.addr = addr;
+            this.size = size;
+        }
+
+        long addr()  { return this.addr; }
+        int  size()  { return this.size; }
+    }
+
+    static class AllocThread extends Thread {
+        AllocThread() {
+            this.setName("MallocThread");
+            this.start();
+        }
+
+        // AllocThread only runs "Alloc" phase
+        public void run() {
+            Random random = new Random();
+            while (MallocStressTest.phase == TestPhase.alloc) {
+                int r = Math.abs(random.nextInt());
+                // Only malloc small amount to avoid OOM
+                int size = r % 32;
+                if (is_64_bit_system()) {
+                    r = r % 32 * K;
+                } else {
+                    r = r % 64;
+                }
+                if (size == 0) size = 1;
+                long addr = MallocStressTest.whiteBox.NMTMallocWithPseudoStack(size, r);
+                if (addr != 0) {
+                    MallocMemory mem = new MallocMemory(addr, size);
+                    synchronized(MallocStressTest.mallocd_memory) {
+                        MallocStressTest.mallocd_memory.add(mem);
+                        MallocStressTest.mallocd_total += size;
+                    }
+                } else {
+                    System.out.println("Out of malloc memory");
+                    break;
+                }
+            }
+            MallocStressTest.pause_count.incrementAndGet();
+        }
+    }
+
+    static class ReleaseThread extends Thread {
+        private Random random = new Random();
+        ReleaseThread() {
+            this.setName("ReleaseThread");
+            this.start();
+        }
+
+        public void run() {
+            while(true) {
+                switch(MallocStressTest.phase) {
+                case alloc:
+                    slow_release();
+                    break;
+                case pause:
+                    enter_pause();
+                    break;
+                case release:
+                    quick_release();
+                    return;
+                }
+            }
+        }
+
+        private void enter_pause() {
+            MallocStressTest.pause_count.incrementAndGet();
+            while (MallocStressTest.phase != MallocStressTest.TestPhase.release) {
+                try {
+                    synchronized(MallocStressTest.mallocd_memory) {
+                        MallocStressTest.mallocd_memory.wait(10);
+                    }
+                } catch (InterruptedException e) {
+                }
+            }
+        }
+
+        private void quick_release() {
+            List<MallocMemory> free_list;
+            while (true) {
+                synchronized(MallocStressTest.mallocd_memory) {
+                    if (MallocStressTest.mallocd_memory.isEmpty()) return;
+                    int size =  Math.min(MallocStressTest.mallocd_memory.size(), 5000);
+                    List<MallocMemory> subList = MallocStressTest.mallocd_memory.subList(0, size);
+                    free_list = new ArrayList<MallocMemory>(subList);
+                    subList.clear();
+                }
+                for (int index = 0; index < free_list.size(); index ++) {
+                    MallocMemory mem = free_list.get(index);
+                    MallocStressTest.whiteBox.NMTFree(mem.addr());
+                }
+            }
+        }
+
+        private void slow_release() {
+            try {
+                Thread.sleep(10);
+            } catch (InterruptedException e) {
+            }
+            synchronized(MallocStressTest.mallocd_memory) {
+                if (MallocStressTest.mallocd_memory.isEmpty()) return;
+                int n = Math.abs(random.nextInt()) % MallocStressTest.mallocd_memory.size();
+                MallocMemory mem = mallocd_memory.remove(n);
+                MallocStressTest.whiteBox.NMTFree(mem.addr());
+                MallocStressTest.mallocd_total -= mem.size();
+            }
+        }
+    }
+}
--- a/hotspot/test/runtime/NMT/MallocTestType.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/NMT/MallocTestType.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,9 @@
  * @key nmt jcmd
  * @library /testlibrary /testlibrary/whitebox
  * @build MallocTestType
+ * @ignore
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocTestType
  */
 
@@ -51,11 +53,6 @@
     long memAlloc1 = wb.NMTMalloc(512 * 1024);
     wb.NMTFree(memAlloc2);
 
-    // Use WB API to ensure that all data has been merged before we continue
-    if (!wb.NMTWaitForDataMerge()) {
-      throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
-    }
-
     // Run 'jcmd <pid> VM.native_memory summary'
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary"});
     output = new OutputAnalyzer(pb.start());
@@ -64,10 +61,6 @@
     // Free the memory allocated by NMTAllocTest
     wb.NMTFree(memAlloc1);
 
-    // Use WB API to ensure that all data has been merged before we continue
-    if (!wb.NMTWaitForDataMerge()) {
-      throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
-    }
     output = new OutputAnalyzer(pb.start());
     output.shouldNotContain("Test (reserved=");
   }
--- a/hotspot/test/runtime/NMT/PrintNMTStatistics.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/NMT/PrintNMTStatistics.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,9 @@
  * @summary Make sure PrintNMTStatistics works on normal JVM exit
  * @library /testlibrary /testlibrary/whitebox
  * @build PrintNMTStatistics
+ * @ignore
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main PrintNMTStatistics
  */
 
@@ -45,10 +47,6 @@
     // We start a new java process running with an argument and use WB API to ensure
     // we have data for NMT on VM exit
     if (args.length > 0) {
-      // Use WB API to ensure that all data has been merged before we continue
-      if (!WhiteBox.getWhiteBox().NMTWaitForDataMerge()) {
-        throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
-      }
       return;
     }
 
--- a/hotspot/test/runtime/NMT/PrintNMTStatisticsWithNMTDisabled.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/NMT/PrintNMTStatisticsWithNMTDisabled.java	Thu Aug 28 14:53:43 2014 -0700
@@ -26,6 +26,7 @@
  * @key nmt
  * @summary Trying to enable PrintNMTStatistics should result in a warning
  * @library /testlibrary
+ * @ignore
  */
 
 import com.oracle.java.testlibrary.*;
--- a/hotspot/test/runtime/NMT/ReleaseCommittedMemory.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/NMT/ReleaseCommittedMemory.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,9 @@
  * @key nmt regression
  * @library /testlibrary /testlibrary/whitebox
  * @build ReleaseCommittedMemory
+ * @ignore
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail ReleaseCommittedMemory
  */
 
@@ -44,7 +46,6 @@
     addr = wb.NMTReserveMemory(reserveSize);
     wb.NMTCommitMemory(addr, 128*1024);
     wb.NMTReleaseMemory(addr, reserveSize);
-    wb.NMTWaitForDataMerge();
   }
 }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/ReleaseNoCommit.java	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Release uncommitted memory and make sure NMT handles it correctly
+ * @key nmt regression
+ * @library /testlibrary /testlibrary/whitebox
+ * @build ReleaseNoCommit
+ * @ignore
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=summary ReleaseNoCommit
+ */
+
+import com.oracle.java.testlibrary.JDKToolFinder;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+
+import sun.hotspot.WhiteBox;
+
+public class ReleaseNoCommit {
+
+    public static void main(String args[]) throws Exception {
+        WhiteBox wb = WhiteBox.getWhiteBox();
+        long reserveSize = 256 * 1024;
+        long addr;
+
+        ProcessBuilder pb = new ProcessBuilder();
+        OutputAnalyzer output;
+        // Grab my own PID
+        String pid = Integer.toString(ProcessTools.getProcessId());
+
+        addr = wb.NMTReserveMemory(reserveSize);
+        // Check for reserved
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=KB"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain(" Test (reserved=256KB, committed=0KB)");
+
+        wb.NMTReleaseMemory(addr, reserveSize);
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=KB"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldNotContain("Test (reserved=");
+    }
+}
--- a/hotspot/test/runtime/NMT/ShutdownTwice.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/NMT/ShutdownTwice.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
  * @key nmt jcmd
  * @summary Run shutdown twice
  * @library /testlibrary
+ * @ignore
  * @run main/othervm -XX:NativeMemoryTracking=detail ShutdownTwice
  */
 
@@ -45,12 +46,12 @@
     output = new OutputAnalyzer(pb.start());
 
     // Verify that jcmd reports that NMT is shutting down
-    output.shouldContain("Shutdown is in progress, it will take a few moments to completely shutdown");
+    output.shouldContain("Native memory tracking has been turned off");
 
     // Run shutdown again
     output = new OutputAnalyzer(pb.start());
 
     // Verify that jcmd reports that NMT has been shutdown already
-    output.shouldContain("Native memory tracking has been shutdown by user");
+    output.shouldContain("Native memory tracking has been shutdown");
   }
 }
--- a/hotspot/test/runtime/NMT/SummaryAfterShutdown.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/NMT/SummaryAfterShutdown.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
  * @key nmt jcmd
  * @summary Verify that jcmd correctly reports that NMT is not enabled after a shutdown
  * @library /testlibrary
+ * @ignore
  * @run main/othervm -XX:NativeMemoryTracking=detail SummaryAfterShutdown
  */
 
@@ -44,13 +45,13 @@
     output = new OutputAnalyzer(pb.start());
 
     // Verify that jcmd reports that NMT is shutting down
-    output.shouldContain("Shutdown is in progress, it will take a few moments to completely shutdown");
+    output.shouldContain("Native memory tracking has been turned off");
 
     // Run 'jcmd <pid> VM.native_memory summary'
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary"});
     output = new OutputAnalyzer(pb.start());
 
     // Verify that jcmd reports that NMT has been shutdown
-    output.shouldContain("Native memory tracking has been shutdown by user");
+    output.shouldContain("Native memory tracking has been shutdown");
   }
 }
--- a/hotspot/test/runtime/NMT/SummarySanityCheck.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/NMT/SummarySanityCheck.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,9 @@
  * @summary Sanity check the output of NMT
  * @library /testlibrary /testlibrary/whitebox
  * @build SummarySanityCheck
+ * @ignore
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:NativeMemoryTracking=summary -XX:+WhiteBoxAPI SummarySanityCheck
  */
 
@@ -44,11 +46,6 @@
     // Grab my own PID
     String pid = Integer.toString(ProcessTools.getProcessId());
 
-    // Use WB API to ensure that all data has been merged before we continue
-    if (!WhiteBox.getWhiteBox().NMTWaitForDataMerge()) {
-      throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
-    }
-
     ProcessBuilder pb = new ProcessBuilder();
 
     // Run  'jcmd <pid> VM.native_memory summary scale=KB'
@@ -69,13 +66,13 @@
     // Match '- <mtType> (reserved=<reserved>KB, committed=<committed>KB)
     Pattern mtTypePattern = Pattern.compile("-\\s+(?<typename>[\\w\\s]+)\\(reserved=(?<reserved>\\d+)KB,\\scommitted=(?<committed>\\d+)KB\\)");
     // Match 'Total: reserved=<reserved>KB, committed=<committed>KB'
-    Pattern totalMemoryPattern = Pattern.compile("Total\\:\\s\\sreserved=(?<reserved>\\d+)KB,\\s\\scommitted=(?<committed>\\d+)KB");
+    Pattern totalMemoryPattern = Pattern.compile("Total\\:\\sreserved=(?<reserved>\\d+)KB,\\scommitted=(?<committed>\\d+)KB");
 
     for (int i = 0; i < lines.length; i++) {
       if (lines[i].startsWith("Total")) {
         Matcher totalMemoryMatcher = totalMemoryPattern.matcher(lines[i]);
 
-        if (totalMemoryMatcher.matches() && totalMemoryMatcher.groupCount() == 2) {
+        if (totalMemoryMatcher.matches()) {
           totalCommitted = Integer.parseInt(totalMemoryMatcher.group("committed"));
           totalReserved = Integer.parseInt(totalMemoryMatcher.group("reserved"));
         } else {
--- a/hotspot/test/runtime/NMT/ThreadedMallocTestType.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/NMT/ThreadedMallocTestType.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,9 @@
  * @key nmt jcmd
  * @library /testlibrary /testlibrary/whitebox
  * @build ThreadedMallocTestType
+ * @ignore
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail ThreadedMallocTestType
  */
 
@@ -58,11 +60,6 @@
     allocThread.start();
     allocThread.join();
 
-    // Use WB API to ensure that all data has been merged before we continue
-    if (!wb.NMTWaitForDataMerge()) {
-      throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
-    }
-
     // Run 'jcmd <pid> VM.native_memory summary'
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary"});
     output = new OutputAnalyzer(pb.start());
@@ -80,11 +77,6 @@
     freeThread.start();
     freeThread.join();
 
-    // Use WB API to ensure that all data has been merged before we continue
-    if (!wb.NMTWaitForDataMerge()) {
-      throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
-    }
-
     output = new OutputAnalyzer(pb.start());
     output.shouldNotContain("Test (reserved=");
   }
--- a/hotspot/test/runtime/NMT/ThreadedVirtualAllocTestType.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/NMT/ThreadedVirtualAllocTestType.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,9 @@
  * @key nmt jcmd
  * @library /testlibrary /testlibrary/whitebox
  * @build ThreadedVirtualAllocTestType
+ * @ignore
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail ThreadedVirtualAllocTestType
  */
 
@@ -60,8 +62,6 @@
     reserveThread.start();
     reserveThread.join();
 
-    mergeData();
-
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail"});
     output = new OutputAnalyzer(pb.start());
     output.shouldContain("Test (reserved=512KB, committed=0KB)");
@@ -77,8 +77,6 @@
     commitThread.start();
     commitThread.join();
 
-    mergeData();
-
     output = new OutputAnalyzer(pb.start());
     output.shouldContain("Test (reserved=512KB, committed=128KB)");
     if (has_nmt_detail) {
@@ -93,8 +91,6 @@
     uncommitThread.start();
     uncommitThread.join();
 
-    mergeData();
-
     output = new OutputAnalyzer(pb.start());
     output.shouldContain("Test (reserved=512KB, committed=0KB)");
     output.shouldNotMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + commitSize) + "\\] committed");
@@ -107,17 +103,9 @@
     releaseThread.start();
     releaseThread.join();
 
-    mergeData();
-
     output = new OutputAnalyzer(pb.start());
     output.shouldNotContain("Test (reserved=");
     output.shouldNotContain("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + reserveSize) + "\\] reserved");
   }
 
-  public static void mergeData() throws Exception {
-    // Use WB API to ensure that all data has been merged before we continue
-    if (!wb.NMTWaitForDataMerge()) {
-      throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
     }
-  }
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/VirtualAllocCommitUncommitRecommit.java	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Test reserve/commit/uncommit/release of virtual memory and that we track it correctly
+ * @key nmt jcmd
+ * @library /testlibrary /testlibrary/whitebox
+ * @ignore
+ * @build VirtualAllocCommitUncommitRecommit
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail VirtualAllocCommitUncommitRecommit
+ *
+ */
+
+import com.oracle.java.testlibrary.*;
+
+import sun.hotspot.WhiteBox;
+
+public class VirtualAllocCommitUncommitRecommit {
+
+    public static WhiteBox wb = WhiteBox.getWhiteBox();
+
+    public static void main(String args[]) throws Exception {
+        OutputAnalyzer output;
+        long commitSize = 4 * 1024; // 4KB
+        long reserveSize = 1024 * 1024; // 1024KB
+        long addr;
+
+        String pid = Integer.toString(ProcessTools.getProcessId());
+        ProcessBuilder pb = new ProcessBuilder();
+
+        boolean has_nmt_detail = wb.NMTIsDetailSupported();
+        if (has_nmt_detail) {
+            System.out.println("NMT detail support detected.");
+        } else {
+            System.out.println("NMT detail support not detected.");
+        }
+
+        // reserve
+        addr = wb.NMTReserveMemory(reserveSize);
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid,
+                "VM.native_memory", "detail" });
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=1024KB, committed=0KB)");
+        if (has_nmt_detail) {
+            output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+                    + Long.toHexString(addr + reserveSize)
+                    + "\\] reserved 1024KB for Test");
+        }
+
+        long addrA = addr;
+        long addrB = addr + commitSize;
+        long addrC = addr + (2 * commitSize);
+        long addrD = addr + (3 * commitSize);
+        long addrE = addr + (4 * commitSize);
+        long addrF = addr + (5 * commitSize);
+
+        // commit ABCD
+        wb.NMTCommitMemory(addrA, commitSize);
+        wb.NMTCommitMemory(addrB, commitSize);
+        wb.NMTCommitMemory(addrC, commitSize);
+        wb.NMTCommitMemory(addrD, commitSize);
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=1024KB, committed=16KB)");
+
+        if (has_nmt_detail) {
+            output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+                    + Long.toHexString(addr + reserveSize)
+                    + "\\] reserved 1024KB for Test");
+        }
+        // uncommit BC
+        wb.NMTUncommitMemory(addrB, commitSize);
+        wb.NMTUncommitMemory(addrC, commitSize);
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=1024KB, committed=8KB)");
+
+        if (has_nmt_detail) {
+            output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+                    + Long.toHexString(addr + reserveSize)
+                    + "\\] reserved 1024KB for Test");
+        }
+
+        // commit EF
+        wb.NMTCommitMemory(addrE, commitSize);
+        wb.NMTCommitMemory(addrF, commitSize);
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=1024KB, committed=16KB)");
+        if (has_nmt_detail) {
+            output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+                    + Long.toHexString(addr + reserveSize)
+                    + "\\] reserved 1024KB for Test");
+        }
+
+        // uncommit A
+        wb.NMTUncommitMemory(addrA, commitSize);
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=1024KB, committed=12KB)");
+        if (has_nmt_detail) {
+            output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+                    + Long.toHexString(addr + reserveSize)
+                    + "\\] reserved 1024KB for Test");
+        }
+
+        // commit ABC
+        wb.NMTCommitMemory(addrA, commitSize);
+        wb.NMTCommitMemory(addrB, commitSize);
+        wb.NMTCommitMemory(addrC, commitSize);
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=1024KB, committed=24KB)");
+        if (has_nmt_detail) {
+            output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+                    + Long.toHexString(addr + reserveSize)
+                    + "\\] reserved 1024KB for Test");
+        }
+
+        // uncommit ABCDEF
+        wb.NMTUncommitMemory(addrA, commitSize);
+        wb.NMTUncommitMemory(addrB, commitSize);
+        wb.NMTUncommitMemory(addrC, commitSize);
+        wb.NMTUncommitMemory(addrD, commitSize);
+        wb.NMTUncommitMemory(addrE, commitSize);
+        wb.NMTUncommitMemory(addrF, commitSize);
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=1024KB, committed=0KB)");
+        if (has_nmt_detail) {
+            output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+                    + Long.toHexString(addr + reserveSize)
+                    + "\\] reserved 1024KB for Test");
+        }
+
+        // release
+        wb.NMTReleaseMemory(addr, reserveSize);
+        output = new OutputAnalyzer(pb.start());
+        output.shouldNotContain("Test (reserved=");
+        output.shouldNotMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+                + Long.toHexString(addr + reserveSize) + "\\] reserved");
+    }
+}
--- a/hotspot/test/runtime/NMT/VirtualAllocTestType.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/NMT/VirtualAllocTestType.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,8 +26,10 @@
  * @summary Test Reserve/Commit/Uncommit/Release of virtual memory and that we track it correctly
  * @key nmt jcmd
  * @library /testlibrary /testlibrary/whitebox
+ * @ignore
  * @build VirtualAllocTestType
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail VirtualAllocTestType
  */
 
@@ -54,7 +56,6 @@
     }
 
     addr = wb.NMTReserveMemory(reserveSize);
-    mergeData();
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail"});
 
     output = new OutputAnalyzer(pb.start());
@@ -65,7 +66,6 @@
 
     wb.NMTCommitMemory(addr, commitSize);
 
-    mergeData();
 
     output = new OutputAnalyzer(pb.start());
     output.shouldContain("Test (reserved=256KB, committed=128KB)");
@@ -75,24 +75,15 @@
 
     wb.NMTUncommitMemory(addr, commitSize);
 
-    mergeData();
 
     output = new OutputAnalyzer(pb.start());
     output.shouldContain("Test (reserved=256KB, committed=0KB)");
     output.shouldNotMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + commitSize) + "\\] committed");
 
     wb.NMTReleaseMemory(addr, reserveSize);
-    mergeData();
 
     output = new OutputAnalyzer(pb.start());
     output.shouldNotContain("Test (reserved=");
     output.shouldNotMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + reserveSize) + "\\] reserved");
   }
-
-  public static void mergeData() throws Exception {
-    // Use WB API to ensure that all data has been merged before we continue
-    if (!wb.NMTWaitForDataMerge()) {
-      throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
     }
-  }
-}
--- a/hotspot/test/runtime/SharedArchiveFile/LimitSharedSizes.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/SharedArchiveFile/LimitSharedSizes.java	Thu Aug 28 14:53:43 2014 -0700
@@ -21,8 +21,7 @@
  * questions.
  */
 
-/* @ignore JDK-8043896
- * @test LimitSharedSizes
+/* @test LimitSharedSizes
  * @summary Test handling of limits on shared space size
  * @library /testlibrary
  * @run main LimitSharedSizes
--- a/hotspot/test/runtime/interned/SanityTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/interned/SanityTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build SanityTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI SanityTest
  */
 
--- a/hotspot/test/runtime/jsig/Test8017498.sh	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/jsig/Test8017498.sh	Thu Aug 28 14:53:43 2014 -0700
@@ -24,7 +24,6 @@
 #
 
 ##
-## @ignore 8028806
 ## @test Test8017498.sh
 ## @bug 8017498
 ## @bug 8020791
@@ -75,7 +74,7 @@
 
 $gcc_cmd -DLINUX -fPIC -shared \
     ${EXTRA_CFLAG} -z noexecstack \
-    -o ${TESTSRC}${FS}libTestJNI.so \
+    -o libTestJNI.so \
     -I${COMPILEJAVA}${FS}include \
     -I${COMPILEJAVA}${FS}include${FS}linux \
     ${TESTSRC}${FS}TestJNI.c
@@ -83,7 +82,7 @@
 # run the java test in the background
 cmd="LD_PRELOAD=$MY_LD_PRELOAD \
     ${TESTJAVA}${FS}bin${FS}java \
-    -Djava.library.path=${TESTSRC}${FS} -server TestJNI 100"
+    -Djava.library.path=. -server TestJNI 100"
 echo "$cmd > test.out 2>&1"
 eval $cmd > test.out 2>&1
 
--- a/hotspot/test/runtime/memory/ReadFromNoaccessArea.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/memory/ReadFromNoaccessArea.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build ReadFromNoaccessArea
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main ReadFromNoaccessArea
  */
 
--- a/hotspot/test/runtime/memory/ReserveMemory.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/memory/ReserveMemory.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build ReserveMemory
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main ReserveMemory
  */
 
--- a/hotspot/test/runtime/memory/RunUnitTestsConcurrently.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/memory/RunUnitTestsConcurrently.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build RunUnitTestsConcurrently
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI RunUnitTestsConcurrently 30 15000
  */
 
--- a/hotspot/test/runtime/memory/StressVirtualSpaceResize.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/memory/StressVirtualSpaceResize.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build StressVirtualSpaceResize
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI StressVirtualSpaceResize
  */
 
--- a/hotspot/test/runtime/whitebox/WBStackSize.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/runtime/whitebox/WBStackSize.java	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build WBStackSize
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xss512k WBStackSize
  */
 
--- a/hotspot/test/sanity/WBApi.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/sanity/WBApi.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build WBApi
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI WBApi
  */
 
--- a/hotspot/test/sanity/WhiteBox.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/sanity/WhiteBox.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
  * @library /testlibrary
  * @compile WhiteBox.java
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI sun.hotspot.WhiteBox
  * @clean sun.hotspot.WhiteBox
  */
--- a/hotspot/test/serviceability/ParserTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/serviceability/ParserTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build ClassFileInstaller sun.hotspot.WhiteBox sun.hotspot.parser.*
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI ParserTest
  */
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/serviceability/attach/AttachSetGetFlag.java	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8054823
+ * @summary Tests the setFlag and printFlag attach command
+ * @library /testlibrary
+ * @build com.oracle.java.testlibrary.* AttachSetGetFlag
+ * @run driver AttachSetGetFlag
+ */
+
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.io.InputStream;
+import java.lang.reflect.Field;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+
+import sun.tools.attach.HotSpotVirtualMachine;
+
+import com.oracle.java.testlibrary.Asserts;
+import com.oracle.java.testlibrary.Platform;
+import com.oracle.java.testlibrary.ProcessTools;
+import com.sun.tools.attach.VirtualMachine;
+
+public class AttachSetGetFlag {
+
+  public static void main(String... args) throws Exception {
+    // Test a manageable uintx flag.
+    testGetFlag("MaxHeapFreeRatio", "60");
+    testSetFlag("MaxHeapFreeRatio", "50", "60");
+
+    // Test a non-manageable size_t flag.
+    // Since it is not manageable, we can't test the setFlag functionality.
+    testGetFlag("ArrayAllocatorMallocLimit", "128");
+    // testSetFlag("ArrayAllocatorMallocLimit", "64", "128");
+  }
+
+  public static ProcessBuilder runTarget(String flagName, String flagValue) throws Exception {
+    return ProcessTools.createJavaProcessBuilder(
+        "-XX:+UnlockExperimentalVMOptions",
+        "-XX:" + flagName + "=" + flagValue,
+        "AttachSetGetFlag$Target");
+  }
+
+  public static void testGetFlag(String flagName, String flagValue) throws Exception {
+    ProcessBuilder pb = runTarget(flagName, flagValue);
+
+    Process target = pb.start();
+
+    try {
+      waitForReady(target);
+
+      int pid = (int)target.getPid();
+
+      HotSpotVirtualMachine vm = (HotSpotVirtualMachine)VirtualMachine.attach(((Integer)pid).toString());
+
+      // Test Get
+      BufferedReader remoteDataReader = new BufferedReader(new InputStreamReader(
+          vm.printFlag(flagName)));
+
+      boolean foundExpectedLine = false;
+
+      String line = null;
+      while((line = remoteDataReader.readLine()) != null) {
+        System.out.println("printFlag: " + line);
+        if (line.equals("-XX:" + flagName + "=" + flagValue)) {
+          foundExpectedLine = true;
+        }
+      }
+
+      Asserts.assertTrue(foundExpectedLine, "Didn't get the expected output: '-XX:" + flagName + "=" + flagValue + "'");
+
+      vm.detach();
+    }
+    finally {
+      target.destroy();
+      target.waitFor();
+    }
+  }
+
+  public static void testSetFlag(String flagName, String initialFlagValue, String flagValue) throws Exception {
+    ProcessBuilder pb = runTarget(flagName, initialFlagValue);
+
+    Process target = pb.start();
+
+    try {
+      waitForReady(target);
+
+      int pid = (int)target.getPid();
+
+      HotSpotVirtualMachine vm = (HotSpotVirtualMachine)VirtualMachine.attach(((Integer)pid).toString());
+
+      // First set the value.
+      BufferedReader remoteDataReader = new BufferedReader(new InputStreamReader(
+          vm.setFlag(flagName, flagValue)));
+
+      String line;
+      while((line = remoteDataReader.readLine()) != null) {
+        System.out.println("setFlag: " + line);
+        // Just empty the stream.
+      }
+      remoteDataReader.close();
+
+      // Then read and make sure we get back the set value.
+      remoteDataReader = new BufferedReader(new InputStreamReader(vm.printFlag(flagName)));
+
+      boolean foundExpectedLine = false;
+      line = null;
+      while((line = remoteDataReader.readLine()) != null) {
+        System.out.println("getFlag: " + line);
+        if (line.equals("-XX:" + flagName + "=" + flagValue)) {
+          foundExpectedLine = true;
+        }
+      }
+
+      Asserts.assertTrue(foundExpectedLine, "Didn't get the expected output: '-XX:" + flagName + "=" + flagValue + "'");
+
+      vm.detach();
+
+    } finally {
+      target.destroy();
+      target.waitFor();
+    }
+  }
+
+  private static void waitForReady(Process target) throws Exception {
+    InputStream os = target.getInputStream();
+    try (BufferedReader reader = new BufferedReader(new InputStreamReader(os))) {
+      String line;
+      while ((line = reader.readLine()) != null) {
+        if ("Ready".equals(line)) {
+          return;
+        }
+      }
+    }
+  }
+
+
+  public static class Target {
+    public static void main(String [] args) throws Exception {
+      System.out.println("Ready");
+      System.out.flush();
+      while (true) {
+        Thread.sleep(1000);
+      }
+    }
+  }
+}
--- a/hotspot/test/test_env.sh	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/test_env.sh	Thu Aug 28 14:53:43 2014 -0700
@@ -1,6 +1,6 @@
 #!/bin/sh
 #
-#  Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+#  Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
 #  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 #  This code is free software; you can redistribute it and/or modify it
@@ -50,6 +50,9 @@
 fi
 echo "TESTCLASSES=${TESTCLASSES}"
 
+TESTOPTS="${TESTVMOPTS} ${TESTJAVAOPTS}"
+echo "TESTOPTS=${TESTOPTS}"
+
 # set platform-dependent variables
 OS=`uname -s`
 case "$OS" in
@@ -101,14 +104,14 @@
 echo "THIS_DIR=${THIS_DIR}"
 
 # Check to ensure the java defined actually works
-${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -version
+${TESTJAVA}${FS}bin${FS}java ${TESTOPTS} -version
 if [ $? != 0 ]; then
-  echo "Wrong TESTJAVA or TESTVMOPTS:"
-  echo $TESTJAVA TESTVMOPTS
+  echo "Wrong TESTJAVA or TESTJAVAOPTS or TESTVMOPTS:"
+  echo ''$TESTJAVA'' ''$TESTJAVAOPTS'' ''$TESTVMOPTS''
   exit 1
 fi
 
-${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -Xinternalversion > vm_version.out 2>&1
+${TESTJAVA}${FS}bin${FS}java ${TESTOPTS} -Xinternalversion > vm_version.out 2>&1
 
 VM_TYPE="unknown"
 grep "Server" vm_version.out > ${NULL}
--- a/hotspot/test/testlibrary/ctw/test/ClassesDirTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/testlibrary/ctw/test/ClassesDirTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox /testlibrary/ctw/src
  * @build ClassFileInstaller sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox Foo Bar
  * @run main ClassFileInstaller sun.hotspot.WhiteBox Foo Bar
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main ClassesDirTest prepare
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Dsun.hotspot.tools.ctw.logfile=ctw.log sun.hotspot.tools.ctw.CompileTheWorld classes
  * @run main ClassesDirTest check ctw.log
--- a/hotspot/test/testlibrary/ctw/test/ClassesListTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/testlibrary/ctw/test/ClassesListTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox /testlibrary/ctw/src
  * @build ClassFileInstaller sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox Foo Bar
  * @run main ClassFileInstaller sun.hotspot.WhiteBox Foo Bar
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main ClassesListTest prepare
  * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Dsun.hotspot.tools.ctw.logfile=ctw.log sun.hotspot.tools.ctw.CompileTheWorld classes.lst
  * @run main ClassesListTest check ctw.log
--- a/hotspot/test/testlibrary/ctw/test/JarDirTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/testlibrary/ctw/test/JarDirTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox /testlibrary/ctw/src
  * @build ClassFileInstaller com.oracle.java.testlibrary.* sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox Foo Bar
  * @run main ClassFileInstaller sun.hotspot.WhiteBox Foo Bar
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main JarDirTest prepare
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Dsun.hotspot.tools.ctw.logfile=ctw.log sun.hotspot.tools.ctw.CompileTheWorld jars/*
  * @run main JarDirTest check ctw.log
--- a/hotspot/test/testlibrary/ctw/test/JarsTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/testlibrary/ctw/test/JarsTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox /testlibrary/ctw/src
  * @build ClassFileInstaller com.oracle.java.testlibrary.* sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox Foo Bar
  * @run main ClassFileInstaller sun.hotspot.WhiteBox Foo Bar
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main JarsTest prepare
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Dsun.hotspot.tools.ctw.logfile=ctw.log sun.hotspot.tools.ctw.CompileTheWorld foo.jar bar.jar
  * @run main JarsTest check ctw.log
--- a/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Thu Aug 28 14:53:43 2014 -0700
@@ -95,7 +95,8 @@
   public native void NMTCommitMemory(long addr, long size);
   public native void NMTUncommitMemory(long addr, long size);
   public native void NMTReleaseMemory(long addr, long size);
-  public native boolean NMTWaitForDataMerge();
+  public native void NMTOverflowHashBucket(long num);
+  public native long NMTMallocWithPseudoStack(long size, int index);
   public native boolean NMTIsDetailSupported();
 
   // Compiler
@@ -167,17 +168,20 @@
   public native void    setIntxVMFlag(String name, long value);
   public native void    setUintxVMFlag(String name, long value);
   public native void    setUint64VMFlag(String name, long value);
+  public native void    setSizeTVMFlag(String name, long value);
   public native void    setStringVMFlag(String name, String value);
   public native void    setDoubleVMFlag(String name, double value);
   public native Boolean getBooleanVMFlag(String name);
   public native Long    getIntxVMFlag(String name);
   public native Long    getUintxVMFlag(String name);
   public native Long    getUint64VMFlag(String name);
+  public native Long    getSizeTVMFlag(String name);
   public native String  getStringVMFlag(String name);
   public native Double  getDoubleVMFlag(String name);
   private final List<Function<String,Object>> flagsGetters = Arrays.asList(
     this::getBooleanVMFlag, this::getIntxVMFlag, this::getUintxVMFlag,
-    this::getUint64VMFlag, this::getStringVMFlag, this::getDoubleVMFlag);
+    this::getUint64VMFlag, this::getSizeTVMFlag, this::getStringVMFlag,
+    this::getDoubleVMFlag);
 
   public Object getVMFlag(String name) {
     return flagsGetters.stream()
--- a/hotspot/test/testlibrary_tests/whitebox/vm_flags/BooleanTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/testlibrary_tests/whitebox/vm_flags/BooleanTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build BooleanTest ClassFileInstaller sun.hotspot.WhiteBox com.oracle.java.testlibrary.*
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI BooleanTest
  * @summary testing of WB::set/getBooleanVMFlag()
  * @author igor.ignatyev@oracle.com
--- a/hotspot/test/testlibrary_tests/whitebox/vm_flags/DoubleTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/testlibrary_tests/whitebox/vm_flags/DoubleTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build DoubleTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI DoubleTest
  * @summary testing of WB::set/getDoubleVMFlag()
  * @author igor.ignatyev@oracle.com
--- a/hotspot/test/testlibrary_tests/whitebox/vm_flags/IntxTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/testlibrary_tests/whitebox/vm_flags/IntxTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build IntxTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI IntxTest
  * @summary testing of WB::set/getIntxVMFlag()
  * @author igor.ignatyev@oracle.com
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/testlibrary_tests/whitebox/vm_flags/SizeTTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test SizeTTest
+ * @bug 8054823
+ * @library /testlibrary /testlibrary/whitebox
+ * @build SizeTTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions SizeTTest
+ * @summary testing of WB::set/getSizeTVMFlag()
+ */
+import com.oracle.java.testlibrary.Platform;
+
+public class SizeTTest {
+    private static final String FLAG_NAME = "ArrayAllocatorMallocLimit";
+    private static final Long[] TESTS = {0L, 100L, (long) Integer.MAX_VALUE,
+        (1L << 32L) - 1L, 1L << 32L};
+    private static final Long[] EXPECTED_64 = TESTS;
+    private static final Long[] EXPECTED_32 = {0L, 100L,
+        (long) Integer.MAX_VALUE, (1L << 32L) - 1L, 0L};
+
+    public static void main(String[] args) throws Exception {
+        VmFlagTest.runTest(FLAG_NAME, TESTS,
+            Platform.is64bit() ? EXPECTED_64 : EXPECTED_32,
+            VmFlagTest.WHITE_BOX::setSizeTVMFlag,
+            VmFlagTest.WHITE_BOX::getSizeTVMFlag);
+    }
+}
--- a/hotspot/test/testlibrary_tests/whitebox/vm_flags/StringTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/testlibrary_tests/whitebox/vm_flags/StringTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build StringTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI StringTest
  * @summary testing of WB::set/getStringVMFlag()
  * @author igor.ignatyev@oracle.com
--- a/hotspot/test/testlibrary_tests/whitebox/vm_flags/Uint64Test.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/testlibrary_tests/whitebox/vm_flags/Uint64Test.java	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build Uint64Test
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI Uint64Test
  * @summary testing of WB::set/getUint64VMFlag()
  * @author igor.ignatyev@oracle.com
--- a/hotspot/test/testlibrary_tests/whitebox/vm_flags/UintxTest.java	Thu Aug 21 14:16:15 2014 -0700
+++ b/hotspot/test/testlibrary_tests/whitebox/vm_flags/UintxTest.java	Thu Aug 28 14:53:43 2014 -0700
@@ -27,6 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build UintxTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI UintxTest
  * @summary testing of WB::set/getUintxVMFlag()
  * @author igor.ignatyev@oracle.com