Merge
authoramurillo
Fri, 30 Aug 2013 00:19:42 -0700
changeset 19561 137826043047
parent 19529 282f99405018 (current diff)
parent 19560 d44eb8cd7ecc (diff)
child 19562 a9ef5fb72167
Merge
hotspot/test/runtime/7051189/Xchecksig.sh
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js	Fri Aug 30 00:19:42 2013 -0700
@@ -35,8 +35,9 @@
 sapkg.code = sapkg.hotspot.code;
 sapkg.compiler = sapkg.hotspot.compiler;
 
-// 'debugger' is a JavaScript keyword :-(
-// sapkg.debugger = sapkg.hotspot.debugger;
+// 'debugger' is a JavaScript keyword, but ES5 relaxes the
+// restriction of using keywords as property name
+sapkg.debugger = sapkg.hotspot.debugger;
 
 sapkg.interpreter = sapkg.hotspot.interpreter;
 sapkg.jdi = sapkg.hotspot.jdi;
@@ -116,27 +117,36 @@
       return args;
     }
 
+    // Handle __has__ specially to avoid metacircularity problems
+    // when called from __get__.
+    // Calling
+    //   this.__has__(name)
+    // will in turn call
+    //   this.__call__('__has__', name)
+    // which is not handled below
+    function __has__(name) {
+      if (typeof(name) == 'number') {
+        return so["has(int)"](name);
+      } else {
+        if (name == '__wrapped__') {
+          return true;
+        } else if (so["has(java.lang.String)"](name)) {
+          return true;
+        } else if (name.equals('toString')) {
+          return true;
+        } else {
+          return false;
+        }
+      }
+    }
+
     if (so instanceof sapkg.utilities.soql.ScriptObject) {
       return new JSAdapter() {
-        __getIds__: function() {                  
-          return so.getIds();         
+        __getIds__: function() {
+          return so.getIds();
         },
   
-        __has__ : function(name) {
-          if (typeof(name) == 'number') {
-            return so["has(int)"](name);
-          } else {
-            if (name == '__wrapped__') {
-              return true;
-            } else if (so["has(java.lang.String)"](name)) {
-              return true;
-            } else if (name.equals('toString')) {
-              return true;
-            } else {
-              return false;
-            }
-          }
-        },
+        __has__ : __has__,
   
         __delete__ : function(name) {
           if (typeof(name) == 'number') {
@@ -147,7 +157,8 @@
         },
   
         __get__ : function(name) {
-          if (! this.__has__(name)) {
+	      // don't call this.__has__(name); see comments above function __has__
+          if (! __has__.call(this, name)) {
             return undefined;
           }
           if (typeof(name) == 'number') {
@@ -162,7 +173,7 @@
                   var args = prepareArgsArray(arguments);
                   var r;
                   try {
-                    r = value.call(args);
+                    r = value.call(Java.to(args, 'java.lang.Object[]'));
                   } catch (e) {
                     println("call to " + name + " failed!");
                     throw e;
@@ -204,6 +215,18 @@
   }
 
   // define "writeln" and "write" if not defined
+  if (typeof(println) == 'undefined') {
+    println = function (str) {
+      java.lang.System.out.println(String(str));
+    }
+  }
+
+  if (typeof(print) == 'undefined') {
+    print = function (str) {
+      java.lang.System.out.print(String(str));
+    }
+  }
+
   if (typeof(writeln) == 'undefined') {
     writeln = println;
   }
@@ -235,7 +258,7 @@
 
     this.jclasses = function() {
       forEachKlass(function (clazz) {
-        writeln(clazz.getName().asString() + " @" + clazz.getHandle().toString()); 
+        writeln(clazz.getName().asString() + " @" + clazz.getAddress().toString()); 
       });
     }
     registerCommand("classes", "classes", "jclasses");
@@ -490,14 +513,14 @@
 function forEachKlass(callback) {
    var VisitorClass = sapkg.memory.SystemDictionary.ClassVisitor;
    var visitor = new VisitorClass() { visit: callback };
-   sa.sysDict["classesDo(sun.jvm.hotspot.memory.SystemDictionary$ClassVisitor)"](visitor);
+   sa.sysDict["classesDo(sun.jvm.hotspot.memory.SystemDictionary.ClassVisitor)"](visitor);
 }
 
 // iterate system dictionary for each 'Klass' and initiating loader
 function forEachKlassAndLoader(callback) {
    var VisitorClass = sapkg.memory.SystemDictionary.ClassAndLoaderVisitor;
    var visitor = new VisitorClass() { visit: callback };
-   sa.sysDict["classesDo(sun.jvm.hotspot.memory.SystemDictionary$ClassAndLoaderVisitor)"](visitor);
+   sa.sysDict["classesDo(sun.jvm.hotspot.memory.SystemDictionary.ClassAndLoaderVisitor)"](visitor);
 }
 
 // iterate system dictionary for each primitive array klass
@@ -522,7 +545,12 @@
 
 // iterates Java heap for each Oop
 function forEachOop(callback) {
-   sa.objHeap.iterate(new sapkg.oops.HeapVisitor() { doObj: callback });
+   function empty() { }
+   sa.objHeap.iterate(new sapkg.oops.HeapVisitor() {
+       prologue: empty,
+       doObj: callback,
+       epilogue: empty
+   });
 }
 
 // iterates Java heap for each Oop of given 'klass'.
@@ -536,8 +564,14 @@
    if (includeSubtypes == undefined) {
       includeSubtypes = true;
    }
+
+   function empty() { }
    sa.objHeap.iterateObjectsOfKlass(
-        new sapkg.oops.HeapVisitor() { doObj: callback },
+        new sapkg.oops.HeapVisitor() {
+            prologue: empty,
+            doObj: callback,
+            epilogue: empty
+        },
         klass, includeSubtypes);
 }
 
@@ -746,9 +780,9 @@
          // ignore;
          continue;
    } else {
-      // some type names have ':'. replace to make it as a 
+      // some type names have ':', '<', '>', '*', ' '. replace to make it as a
       // JavaScript identifier
-      tmp.name = tmp.name.replace(':', '_').replace('<', '_').replace('>', '_').replace('*', '_').replace(' ', '_');
+      tmp.name = ("" + tmp.name).replace(/[:<>* ]/g, '_');
       eval("function read" + tmp.name + "(addr) {" +
            "   return readVMType('" + tmp.name + "', addr);}"); 
       eval("function print" + tmp.name + "(addr) {" + 
--- a/hotspot/make/hotspot_version	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/make/hotspot_version	Fri Aug 30 00:19:42 2013 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=47
+HS_BUILD_NUMBER=48
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/hotspot/make/linux/makefiles/amd64.make	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/make/linux/makefiles/amd64.make	Fri Aug 30 00:19:42 2013 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -31,9 +31,4 @@
 
 CFLAGS += -D_LP64=1
 
-# The serviceability agent relies on frame pointer (%rbp) to walk thread stack
-ifndef USE_SUNCC
-  CFLAGS += -fno-omit-frame-pointer
-endif
-
 OPT_CFLAGS/compactingPermGenGen.o = -O1
--- a/hotspot/make/linux/makefiles/gcc.make	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/make/linux/makefiles/gcc.make	Fri Aug 30 00:19:42 2013 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -398,3 +398,10 @@
 ifdef MINIMIZE_RAM_USAGE
 CFLAGS += -DMINIMIZE_RAM_USAGE
 endif
+
+# Stack walking in the JVM relies on frame pointer (%rbp) to walk thread stack.
+# Explicitly specify -fno-omit-frame-pointer because it is off by default
+# starting with gcc 4.6.
+ifndef USE_SUNCC
+  CFLAGS += -fno-omit-frame-pointer
+endif
--- a/hotspot/make/windows/build_vm_def.sh	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/make/windows/build_vm_def.sh	Fri Aug 30 00:19:42 2013 -0700
@@ -42,8 +42,6 @@
  MKS_HOME=`dirname "$SH"`
 fi
 
-echo "EXPORTS" > vm1.def
-
 AWK="$MKS_HOME/awk.exe"
 if [ ! -e $AWK ]; then
     AWK="$MKS_HOME/gawk.exe"
@@ -55,6 +53,22 @@
 RM="$MKS_HOME/rm.exe"
 DUMPBIN="link.exe /dump"
 
+if [ "$1" = "-nosa" ]; then
+    echo EXPORTS > vm.def
+    echo ""
+    echo "***"
+    echo "*** Not building SA: BUILD_WIN_SA != 1"
+    echo "*** C++ Vtables NOT included in vm.def"
+    echo "*** This jvm.dll will NOT work properly with SA."
+    echo "***"
+    echo "*** When in doubt, set BUILD_WIN_SA=1, clean and rebuild."
+    echo "***"
+    echo ""
+    exit
+fi
+
+echo "EXPORTS" > vm1.def
+
 # When called from IDE the first param should contain the link version, otherwise may be nill
 if [ "x$1" != "x" ]; then
 LD_VER="$1"
--- a/hotspot/make/windows/makefiles/debug.make	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/make/windows/makefiles/debug.make	Fri Aug 30 00:19:42 2013 -0700
@@ -49,9 +49,6 @@
 # Force resources to be rebuilt every time
 $(Res_Files): FORCE
 
-vm.def: $(Obj_Files)
-	sh $(WorkSpace)/make/windows/build_vm_def.sh
-
 $(AOUT): $(Res_Files) $(Obj_Files) vm.def
 	$(LD) @<<
   $(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
--- a/hotspot/make/windows/makefiles/fastdebug.make	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/make/windows/makefiles/fastdebug.make	Fri Aug 30 00:19:42 2013 -0700
@@ -48,9 +48,6 @@
 # Force resources to be rebuilt every time
 $(Res_Files): FORCE
 
-vm.def: $(Obj_Files)
-	sh $(WorkSpace)/make/windows/build_vm_def.sh
-
 $(AOUT): $(Res_Files) $(Obj_Files) vm.def
 	$(LD) @<<
   $(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
--- a/hotspot/make/windows/makefiles/product.make	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/make/windows/makefiles/product.make	Fri Aug 30 00:19:42 2013 -0700
@@ -51,9 +51,6 @@
 # Force resources to be rebuilt every time
 $(Res_Files): FORCE
 
-vm.def: $(Obj_Files)
-	sh $(WorkSpace)/make/windows/build_vm_def.sh
-
 $(AOUT): $(Res_Files) $(Obj_Files) vm.def
 	$(LD) @<<
   $(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
--- a/hotspot/make/windows/makefiles/projectcreator.make	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/make/windows/makefiles/projectcreator.make	Fri Aug 30 00:19:42 2013 -0700
@@ -92,6 +92,10 @@
         -disablePch        getThread_windows_$(Platform_arch).cpp \
         -disablePch_compiler2     opcodes.cpp
 
+!if "$(BUILD_WIN_SA)" != "1"
+BUILD_VM_DEF_FLAG=-nosa
+!endif
+
 # Common options for the IDE builds for c1, and c2
 ProjectCreatorIDEOptions=\
         $(ProjectCreatorIDEOptions) \
@@ -104,7 +108,7 @@
         -jdkTargetRoot $(HOTSPOTJDKDIST) \
         -define ALIGN_STACK_FRAMES \
         -define VM_LITTLE_ENDIAN \
-        -prelink  "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b	set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME)	set JAVA_HOME=$(HOTSPOTJDKDIST)	$(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(LD_VER)" \
+        -prelink  "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b	set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME)	set JAVA_HOME=$(HOTSPOTJDKDIST)	$(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(BUILD_VM_DEF_FLAG) $(LD_VER)" \
         -ignoreFile jsig.c \
         -ignoreFile jvmtiEnvRecommended.cpp \
         -ignoreFile jvmtiEnvStub.cpp \
--- a/hotspot/make/windows/makefiles/vm.make	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/make/windows/makefiles/vm.make	Fri Aug 30 00:19:42 2013 -0700
@@ -393,3 +393,11 @@
 _build_pch_file.obj:
         @echo #include "precompiled.hpp" > ../generated/_build_pch_file.cpp
         $(CXX) $(CXX_FLAGS) /Fp"vm.pch" /Yc"precompiled.hpp" /c ../generated/_build_pch_file.cpp
+
+!if "$(BUILD_WIN_SA)" != "1"
+BUILD_VM_DEF_FLAG=-nosa
+!endif
+
+vm.def: $(Obj_Files)
+	sh $(WorkSpace)/make/windows/build_vm_def.sh $(BUILD_VM_DEF_FLAG)
+
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Fri Aug 30 00:19:42 2013 -0700
@@ -642,13 +642,14 @@
 #endif
 
 #ifdef __APPLE__
-static uint64_t locate_unique_thread_id() {
+static uint64_t locate_unique_thread_id(mach_port_t mach_thread_port) {
   // Additional thread_id used to correlate threads in SA
   thread_identifier_info_data_t     m_ident_info;
   mach_msg_type_number_t            count = THREAD_IDENTIFIER_INFO_COUNT;
 
-  thread_info(::mach_thread_self(), THREAD_IDENTIFIER_INFO,
+  thread_info(mach_thread_port, THREAD_IDENTIFIER_INFO,
               (thread_info_t) &m_ident_info, &count);
+
   return m_ident_info.thread_id;
 }
 #endif
@@ -679,9 +680,14 @@
   }
 
 #ifdef __APPLE__
-  // thread_id is mach thread on macos
-  osthread->set_thread_id(::mach_thread_self());
-  osthread->set_unique_thread_id(locate_unique_thread_id());
+  // thread_id is mach thread on macos, which pthreads graciously caches and provides for us
+  mach_port_t thread_id = ::pthread_mach_thread_np(::pthread_self());
+  guarantee(thread_id != 0, "thread id missing from pthreads");
+  osthread->set_thread_id(thread_id);
+
+  uint64_t unique_thread_id = locate_unique_thread_id(thread_id);
+  guarantee(unique_thread_id != 0, "unique thread id was not found");
+  osthread->set_unique_thread_id(unique_thread_id);
 #else
   // thread_id is pthread_id on BSD
   osthread->set_thread_id(::pthread_self());
@@ -843,8 +849,14 @@
 
   // Store pthread info into the OSThread
 #ifdef __APPLE__
-  osthread->set_thread_id(::mach_thread_self());
-  osthread->set_unique_thread_id(locate_unique_thread_id());
+  // thread_id is mach thread on macos, which pthreads graciously caches and provides for us
+  mach_port_t thread_id = ::pthread_mach_thread_np(::pthread_self());
+  guarantee(thread_id != 0, "just checking");
+  osthread->set_thread_id(thread_id);
+
+  uint64_t unique_thread_id = locate_unique_thread_id(thread_id);
+  guarantee(unique_thread_id != 0, "just checking");
+  osthread->set_unique_thread_id(unique_thread_id);
 #else
   osthread->set_thread_id(::pthread_self());
 #endif
@@ -1115,7 +1127,7 @@
 
 intx os::current_thread_id() {
 #ifdef __APPLE__
-  return (intx)::mach_thread_self();
+  return (intx)::pthread_mach_thread_np(::pthread_self());
 #else
   return (intx)::pthread_self();
 #endif
@@ -2313,7 +2325,9 @@
 }
 
 
-char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
+char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+  fatal("This code is not used or maintained.");
+
   // "exec" is passed in but not used.  Creating the shared image for
   // the code cache doesn't have an SHM_X executable permission to check.
   assert(UseLargePages && UseSHM, "only for SHM large pages");
@@ -3275,11 +3289,15 @@
     // and if UserSignalHandler is installed all bets are off
     if (CheckJNICalls) {
       if (libjsig_is_loaded) {
-        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
+        if (PrintJNIResolving) {
+          tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
+        }
         check_signals = false;
       }
       if (AllowUserSignalHandlers) {
-        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
+        if (PrintJNIResolving) {
+          tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
+        }
         check_signals = false;
       }
     }
@@ -4736,3 +4754,8 @@
   return n;
 }
 
+#ifndef PRODUCT
+void TestReserveMemorySpecial_test() {
+  // No tests available for this platform
+}
+#endif
--- a/hotspot/src/os/linux/vm/globals_linux.hpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/os/linux/vm/globals_linux.hpp	Fri Aug 30 00:19:42 2013 -0700
@@ -40,6 +40,9 @@
   product(bool, UseHugeTLBFS, false,                                    \
           "Use MAP_HUGETLB for large pages")                            \
                                                                         \
+  product(bool, UseTransparentHugePages, false,                         \
+          "Use MADV_HUGEPAGE for large pages")                          \
+                                                                        \
   product(bool, LoadExecStackDllInVMThread, true,                       \
           "Load DLLs with executable-stack attribute in the VM Thread") \
                                                                         \
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Fri Aug 30 00:19:42 2013 -0700
@@ -2720,36 +2720,7 @@
 
 int os::Linux::commit_memory_impl(char* addr, size_t size,
                                   size_t alignment_hint, bool exec) {
-  int err;
-  if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
-    int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
-    uintptr_t res =
-      (uintptr_t) ::mmap(addr, size, prot,
-                         MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB,
-                         -1, 0);
-    if (res != (uintptr_t) MAP_FAILED) {
-      if (UseNUMAInterleaving) {
-        numa_make_global(addr, size);
-      }
-      return 0;
-    }
-
-    err = errno;  // save errno from mmap() call above
-
-    if (!recoverable_mmap_error(err)) {
-      // However, it is not clear that this loss of our reserved mapping
-      // happens with large pages on Linux or that we cannot recover
-      // from the loss. For now, we just issue a warning and we don't
-      // call vm_exit_out_of_memory(). This issue is being tracked by
-      // JBS-8007074.
-      warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
-//    vm_exit_out_of_memory(size, OOM_MMAP_ERROR,
-//                          "committing reserved memory.");
-    }
-    // Fall through and try to use small pages
-  }
-
-  err = os::Linux::commit_memory_impl(addr, size, exec);
+  int err = os::Linux::commit_memory_impl(addr, size, exec);
   if (err == 0) {
     realign_memory(addr, size, alignment_hint);
   }
@@ -2774,7 +2745,7 @@
 }
 
 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
-  if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
+  if (UseTransparentHugePages && alignment_hint > (size_t)vm_page_size()) {
     // We don't check the return value: madvise(MADV_HUGEPAGE) may not
     // be supported or the memory may already be backed by huge pages.
     ::madvise(addr, bytes, MADV_HUGEPAGE);
@@ -2787,7 +2758,7 @@
   // uncommitted at all. We don't do anything in this case to avoid creating a segment with
   // small pages on top of the SHM segment. This method always works for small pages, so we
   // allow that in any case.
-  if (alignment_hint <= (size_t)os::vm_page_size() || !UseSHM) {
+  if (alignment_hint <= (size_t)os::vm_page_size() || can_commit_large_page_memory()) {
     commit_memory(addr, bytes, alignment_hint, !ExecMem);
   }
 }
@@ -3157,11 +3128,31 @@
   return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
 }
 
+bool os::Linux::transparent_huge_pages_sanity_check(bool warn, size_t page_size) {
+  bool result = false;
+  void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
+                 MAP_ANONYMOUS|MAP_PRIVATE,
+                 -1, 0);
+  if (p != MAP_FAILED) {
+    void *aligned_p = align_ptr_up(p, page_size);
+
+    result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0;
+
+    munmap(p, page_size * 2);
+  }
+
+  if (warn && !result) {
+    warning("TransparentHugePages is not supported by the operating system.");
+  }
+
+  return result;
+}
+
 bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
   bool result = false;
-  void *p = mmap (NULL, page_size, PROT_READ|PROT_WRITE,
-                  MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
-                  -1, 0);
+  void *p = mmap(NULL, page_size, PROT_READ|PROT_WRITE,
+                 MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
+                 -1, 0);
 
   if (p != MAP_FAILED) {
     // We don't know if this really is a huge page or not.
@@ -3182,12 +3173,10 @@
       }
       fclose(fp);
     }
-    munmap (p, page_size);
-    if (result)
-      return true;
-  }
-
-  if (warn) {
+    munmap(p, page_size);
+  }
+
+  if (warn && !result) {
     warning("HugeTLBFS is not supported by the operating system.");
   }
 
@@ -3235,82 +3224,114 @@
 
 static size_t _large_page_size = 0;
 
-void os::large_page_init() {
-  if (!UseLargePages) {
-    UseHugeTLBFS = false;
-    UseSHM = false;
-    return;
-  }
-
-  if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) {
-    // If UseLargePages is specified on the command line try both methods,
-    // if it's default, then try only HugeTLBFS.
-    if (FLAG_IS_DEFAULT(UseLargePages)) {
-      UseHugeTLBFS = true;
-    } else {
-      UseHugeTLBFS = UseSHM = true;
-    }
-  }
-
-  if (LargePageSizeInBytes) {
-    _large_page_size = LargePageSizeInBytes;
-  } else {
-    // large_page_size on Linux is used to round up heap size. x86 uses either
-    // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
-    // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
-    // page as large as 256M.
-    //
-    // Here we try to figure out page size by parsing /proc/meminfo and looking
-    // for a line with the following format:
-    //    Hugepagesize:     2048 kB
-    //
-    // If we can't determine the value (e.g. /proc is not mounted, or the text
-    // format has been changed), we'll use the largest page size supported by
-    // the processor.
+size_t os::Linux::find_large_page_size() {
+  size_t large_page_size = 0;
+
+  // large_page_size on Linux is used to round up heap size. x86 uses either
+  // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
+  // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
+  // page as large as 256M.
+  //
+  // Here we try to figure out page size by parsing /proc/meminfo and looking
+  // for a line with the following format:
+  //    Hugepagesize:     2048 kB
+  //
+  // If we can't determine the value (e.g. /proc is not mounted, or the text
+  // format has been changed), we'll use the largest page size supported by
+  // the processor.
 
 #ifndef ZERO
-    _large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
-                       ARM_ONLY(2 * M) PPC_ONLY(4 * M);
+  large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
+                     ARM_ONLY(2 * M) PPC_ONLY(4 * M);
 #endif // ZERO
 
-    FILE *fp = fopen("/proc/meminfo", "r");
-    if (fp) {
-      while (!feof(fp)) {
-        int x = 0;
-        char buf[16];
-        if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
-          if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
-            _large_page_size = x * K;
-            break;
-          }
-        } else {
-          // skip to next line
-          for (;;) {
-            int ch = fgetc(fp);
-            if (ch == EOF || ch == (int)'\n') break;
-          }
+  FILE *fp = fopen("/proc/meminfo", "r");
+  if (fp) {
+    while (!feof(fp)) {
+      int x = 0;
+      char buf[16];
+      if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
+        if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
+          large_page_size = x * K;
+          break;
+        }
+      } else {
+        // skip to next line
+        for (;;) {
+          int ch = fgetc(fp);
+          if (ch == EOF || ch == (int)'\n') break;
         }
       }
-      fclose(fp);
     }
-  }
-
-  // print a warning if any large page related flag is specified on command line
-  bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
-
+    fclose(fp);
+  }
+
+  if (!FLAG_IS_DEFAULT(LargePageSizeInBytes) && LargePageSizeInBytes != large_page_size) {
+    warning("Setting LargePageSizeInBytes has no effect on this OS. Large page size is "
+        SIZE_FORMAT "%s.", byte_size_in_proper_unit(large_page_size),
+        proper_unit_for_byte_size(large_page_size));
+  }
+
+  return large_page_size;
+}
+
+size_t os::Linux::setup_large_page_size() {
+  _large_page_size = Linux::find_large_page_size();
   const size_t default_page_size = (size_t)Linux::page_size();
   if (_large_page_size > default_page_size) {
     _page_sizes[0] = _large_page_size;
     _page_sizes[1] = default_page_size;
     _page_sizes[2] = 0;
   }
-  UseHugeTLBFS = UseHugeTLBFS &&
-                 Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
-
-  if (UseHugeTLBFS)
+
+  return _large_page_size;
+}
+
+bool os::Linux::setup_large_page_type(size_t page_size) {
+  if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
+      FLAG_IS_DEFAULT(UseSHM) &&
+      FLAG_IS_DEFAULT(UseTransparentHugePages)) {
+    // If UseLargePages is specified on the command line try all methods,
+    // if it's default, then try only UseTransparentHugePages.
+    if (FLAG_IS_DEFAULT(UseLargePages)) {
+      UseTransparentHugePages = true;
+    } else {
+      UseHugeTLBFS = UseTransparentHugePages = UseSHM = true;
+    }
+  }
+
+  if (UseTransparentHugePages) {
+    bool warn_on_failure = !FLAG_IS_DEFAULT(UseTransparentHugePages);
+    if (transparent_huge_pages_sanity_check(warn_on_failure, page_size)) {
+      UseHugeTLBFS = false;
+      UseSHM = false;
+      return true;
+    }
+    UseTransparentHugePages = false;
+  }
+
+  if (UseHugeTLBFS) {
+    bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
+    if (hugetlbfs_sanity_check(warn_on_failure, page_size)) {
+      UseSHM = false;
+      return true;
+    }
+    UseHugeTLBFS = false;
+  }
+
+  return UseSHM;
+}
+
+void os::large_page_init() {
+  if (!UseLargePages) {
+    UseHugeTLBFS = false;
+    UseTransparentHugePages = false;
     UseSHM = false;
-
-  UseLargePages = UseHugeTLBFS || UseSHM;
+    return;
+  }
+
+  size_t large_page_size = Linux::setup_large_page_size();
+  UseLargePages          = Linux::setup_large_page_type(large_page_size);
 
   set_coredump_filter();
 }
@@ -3319,16 +3340,22 @@
 #define SHM_HUGETLB 04000
 #endif
 
-char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
+char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec) {
   // "exec" is passed in but not used.  Creating the shared image for
   // the code cache doesn't have an SHM_X executable permission to check.
   assert(UseLargePages && UseSHM, "only for SHM large pages");
+  assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
+
+  if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
+    return NULL; // Fallback to small pages.
+  }
 
   key_t key = IPC_PRIVATE;
   char *addr;
 
   bool warn_on_failure = UseLargePages &&
                         (!FLAG_IS_DEFAULT(UseLargePages) ||
+                         !FLAG_IS_DEFAULT(UseSHM) ||
                          !FLAG_IS_DEFAULT(LargePageSizeInBytes)
                         );
   char msg[128];
@@ -3376,42 +3403,219 @@
      return NULL;
   }
 
-  if ((addr != NULL) && UseNUMAInterleaving) {
-    numa_make_global(addr, bytes);
-  }
-
-  // The memory is committed
-  MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
+  return addr;
+}
+
+static void warn_on_large_pages_failure(char* req_addr, size_t bytes, int error) {
+  assert(error == ENOMEM, "Only expect to fail if no memory is available");
+
+  bool warn_on_failure = UseLargePages &&
+      (!FLAG_IS_DEFAULT(UseLargePages) ||
+       !FLAG_IS_DEFAULT(UseHugeTLBFS) ||
+       !FLAG_IS_DEFAULT(LargePageSizeInBytes));
+
+  if (warn_on_failure) {
+    char msg[128];
+    jio_snprintf(msg, sizeof(msg), "Failed to reserve large pages memory req_addr: "
+        PTR_FORMAT " bytes: " SIZE_FORMAT " (errno = %d).", req_addr, bytes, error);
+    warning(msg);
+  }
+}
+
+char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec) {
+  assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
+  assert(is_size_aligned(bytes, os::large_page_size()), "Unaligned size");
+  assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
+
+  int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
+  char* addr = (char*)::mmap(req_addr, bytes, prot,
+                             MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB,
+                             -1, 0);
+
+  if (addr == MAP_FAILED) {
+    warn_on_large_pages_failure(req_addr, bytes, errno);
+    return NULL;
+  }
+
+  assert(is_ptr_aligned(addr, os::large_page_size()), "Must be");
 
   return addr;
 }
 
+char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+  size_t large_page_size = os::large_page_size();
+
+  assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes");
+
+  // Allocate small pages.
+
+  char* start;
+  if (req_addr != NULL) {
+    assert(is_ptr_aligned(req_addr, alignment), "Must be");
+    assert(is_size_aligned(bytes, alignment), "Must be");
+    start = os::reserve_memory(bytes, req_addr);
+    assert(start == NULL || start == req_addr, "Must be");
+  } else {
+    start = os::reserve_memory_aligned(bytes, alignment);
+  }
+
+  if (start == NULL) {
+    return NULL;
+  }
+
+  assert(is_ptr_aligned(start, alignment), "Must be");
+
+  // os::reserve_memory_special will record this memory area.
+  // Need to release it here to prevent overlapping reservations.
+  MemTracker::record_virtual_memory_release((address)start, bytes);
+
+  char* end = start + bytes;
+
+  // Find the regions of the allocated chunk that can be promoted to large pages.
+  char* lp_start = (char*)align_ptr_up(start, large_page_size);
+  char* lp_end   = (char*)align_ptr_down(end, large_page_size);
+
+  size_t lp_bytes = lp_end - lp_start;
+
+  assert(is_size_aligned(lp_bytes, large_page_size), "Must be");
+
+  if (lp_bytes == 0) {
+    // The mapped region doesn't even span the start and the end of a large page.
+    // Fall back to allocate a non-special area.
+    ::munmap(start, end - start);
+    return NULL;
+  }
+
+  int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
+
+
+  void* result;
+
+  if (start != lp_start) {
+    result = ::mmap(start, lp_start - start, prot,
+                    MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
+                    -1, 0);
+    if (result == MAP_FAILED) {
+      ::munmap(lp_start, end - lp_start);
+      return NULL;
+    }
+  }
+
+  result = ::mmap(lp_start, lp_bytes, prot,
+                  MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED|MAP_HUGETLB,
+                  -1, 0);
+  if (result == MAP_FAILED) {
+    warn_on_large_pages_failure(req_addr, bytes, errno);
+    // If the mmap above fails, the large pages region will be unmapped and we
+    // have regions before and after with small pages. Release these regions.
+    //
+    // |  mapped  |  unmapped  |  mapped  |
+    // ^          ^            ^          ^
+    // start      lp_start     lp_end     end
+    //
+    ::munmap(start, lp_start - start);
+    ::munmap(lp_end, end - lp_end);
+    return NULL;
+  }
+
+  if (lp_end != end) {
+      result = ::mmap(lp_end, end - lp_end, prot,
+                      MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
+                      -1, 0);
+    if (result == MAP_FAILED) {
+      ::munmap(start, lp_end - start);
+      return NULL;
+    }
+  }
+
+  return start;
+}
+
+char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+  assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
+  assert(is_ptr_aligned(req_addr, alignment), "Must be");
+  assert(is_power_of_2(alignment), "Must be");
+  assert(is_power_of_2(os::large_page_size()), "Must be");
+  assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes");
+
+  if (is_size_aligned(bytes, os::large_page_size()) && alignment <= os::large_page_size()) {
+    return reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec);
+  } else {
+    return reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec);
+  }
+}
+
+char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+  assert(UseLargePages, "only for large pages");
+
+  char* addr;
+  if (UseSHM) {
+    addr = os::Linux::reserve_memory_special_shm(bytes, alignment, req_addr, exec);
+  } else {
+    assert(UseHugeTLBFS, "must be");
+    addr = os::Linux::reserve_memory_special_huge_tlbfs(bytes, alignment, req_addr, exec);
+  }
+
+  if (addr != NULL) {
+    if (UseNUMAInterleaving) {
+      numa_make_global(addr, bytes);
+    }
+
+    // The memory is committed
+    MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
+  }
+
+  return addr;
+}
+
+bool os::Linux::release_memory_special_shm(char* base, size_t bytes) {
+  // detaching the SHM segment will also delete it, see reserve_memory_special_shm()
+  return shmdt(base) == 0;
+}
+
+bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {
+  return pd_release_memory(base, bytes);
+}
+
 bool os::release_memory_special(char* base, size_t bytes) {
+  assert(UseLargePages, "only for large pages");
+
   MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
-  // detaching the SHM segment will also delete it, see reserve_memory_special()
-  int rslt = shmdt(base);
-  if (rslt == 0) {
+
+  bool res;
+  if (UseSHM) {
+    res = os::Linux::release_memory_special_shm(base, bytes);
+  } else {
+    assert(UseHugeTLBFS, "must be");
+    res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
+  }
+
+  if (res) {
     tkr.record((address)base, bytes);
-    return true;
   } else {
     tkr.discard();
-    return false;
-  }
+  }
+
+  return res;
 }
 
 size_t os::large_page_size() {
   return _large_page_size;
 }
 
-// HugeTLBFS allows application to commit large page memory on demand;
-// with SysV SHM the entire memory region must be allocated as shared
+// With SysV SHM the entire memory region must be allocated as shared
 // memory.
+// HugeTLBFS allows application to commit large page memory on demand.
+// However, when committing memory with HugeTLBFS fails, the region
+// that was supposed to be committed will lose the old reservation
+// and allow other threads to steal that memory region. Because of this
+// behavior we can't commit HugeTLBFS memory.
 bool os::can_commit_large_page_memory() {
-  return UseHugeTLBFS;
+  return UseTransparentHugePages;
 }
 
 bool os::can_execute_large_page_memory() {
-  return UseHugeTLBFS;
+  return UseTransparentHugePages || UseHugeTLBFS;
 }
 
 // Reserve memory at an arbitrary address, only if that area is
@@ -4563,21 +4767,23 @@
         UseNUMA = false;
       }
     }
-    // With SHM large pages we cannot uncommit a page, so there's not way
+    // With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
     // we can make the adaptive lgrp chunk resizing work. If the user specified
-    // both UseNUMA and UseLargePages (or UseSHM) on the command line - warn and
+    // both UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn and
     // disable adaptive resizing.
-    if (UseNUMA && UseLargePages && UseSHM) {
-      if (!FLAG_IS_DEFAULT(UseNUMA)) {
-        if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseSHM)) {
+    if (UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
+      if (FLAG_IS_DEFAULT(UseNUMA)) {
+        UseNUMA = false;
+      } else {
+        if (FLAG_IS_DEFAULT(UseLargePages) &&
+            FLAG_IS_DEFAULT(UseSHM) &&
+            FLAG_IS_DEFAULT(UseHugeTLBFS)) {
           UseLargePages = false;
         } else {
-          warning("UseNUMA is not fully compatible with SHM large pages, disabling adaptive resizing");
+          warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, disabling adaptive resizing");
           UseAdaptiveSizePolicy = false;
           UseAdaptiveNUMAChunkSizing = false;
         }
-      } else {
-        UseNUMA = false;
       }
     }
     if (!UseNUMA && ForceNUMA) {
@@ -5848,3 +6054,149 @@
 }
 
 #endif // JAVASE_EMBEDDED
+
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+#define test_log(...) \
+  do {\
+    if (VerboseInternalVMTests) { \
+      tty->print_cr(__VA_ARGS__); \
+      tty->flush(); \
+    }\
+  } while (false)
+
+class TestReserveMemorySpecial : AllStatic {
+ public:
+  static void small_page_write(void* addr, size_t size) {
+    size_t page_size = os::vm_page_size();
+
+    char* end = (char*)addr + size;
+    for (char* p = (char*)addr; p < end; p += page_size) {
+      *p = 1;
+    }
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs_only(size_t size) {
+    if (!UseHugeTLBFS) {
+      return;
+    }
+
+    test_log("test_reserve_memory_special_huge_tlbfs_only(" SIZE_FORMAT ")", size);
+
+    char* addr = os::Linux::reserve_memory_special_huge_tlbfs_only(size, NULL, false);
+
+    if (addr != NULL) {
+      small_page_write(addr, size);
+
+      os::Linux::release_memory_special_huge_tlbfs(addr, size);
+    }
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs_only() {
+    if (!UseHugeTLBFS) {
+      return;
+    }
+
+    size_t lp = os::large_page_size();
+
+    for (size_t size = lp; size <= lp * 10; size += lp) {
+      test_reserve_memory_special_huge_tlbfs_only(size);
+    }
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs_mixed(size_t size, size_t alignment) {
+    if (!UseHugeTLBFS) {
+        return;
+    }
+
+    test_log("test_reserve_memory_special_huge_tlbfs_mixed(" SIZE_FORMAT ", " SIZE_FORMAT ")",
+        size, alignment);
+
+    assert(size >= os::large_page_size(), "Incorrect input to test");
+
+    char* addr = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
+
+    if (addr != NULL) {
+      small_page_write(addr, size);
+
+      os::Linux::release_memory_special_huge_tlbfs(addr, size);
+    }
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(size_t size) {
+    size_t lp = os::large_page_size();
+    size_t ag = os::vm_allocation_granularity();
+
+    for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
+      test_reserve_memory_special_huge_tlbfs_mixed(size, alignment);
+    }
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs_mixed() {
+    size_t lp = os::large_page_size();
+    size_t ag = os::vm_allocation_granularity();
+
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + ag);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + lp / 2);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + ag);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 - ag);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + lp / 2);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10 + lp / 2);
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs() {
+    if (!UseHugeTLBFS) {
+      return;
+    }
+
+    test_reserve_memory_special_huge_tlbfs_only();
+    test_reserve_memory_special_huge_tlbfs_mixed();
+  }
+
+  static void test_reserve_memory_special_shm(size_t size, size_t alignment) {
+    if (!UseSHM) {
+      return;
+    }
+
+    test_log("test_reserve_memory_special_shm(" SIZE_FORMAT ", " SIZE_FORMAT ")", size, alignment);
+
+    char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false);
+
+    if (addr != NULL) {
+      assert(is_ptr_aligned(addr, alignment), "Check");
+      assert(is_ptr_aligned(addr, os::large_page_size()), "Check");
+
+      small_page_write(addr, size);
+
+      os::Linux::release_memory_special_shm(addr, size);
+    }
+  }
+
+  static void test_reserve_memory_special_shm() {
+    size_t lp = os::large_page_size();
+    size_t ag = os::vm_allocation_granularity();
+
+    for (size_t size = ag; size < lp * 3; size += ag) {
+      for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
+        test_reserve_memory_special_shm(size, alignment);
+      }
+    }
+  }
+
+  static void test() {
+    test_reserve_memory_special_huge_tlbfs();
+    test_reserve_memory_special_shm();
+  }
+};
+
+void TestReserveMemorySpecial_test() {
+  TestReserveMemorySpecial::test();
+}
+
+#endif
--- a/hotspot/src/os/linux/vm/os_linux.hpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.hpp	Fri Aug 30 00:19:42 2013 -0700
@@ -32,6 +32,7 @@
 
 class Linux {
   friend class os;
+  friend class TestReserveMemorySpecial;
 
   // For signal-chaining
 #define MAXSIGNUM 32
@@ -92,8 +93,21 @@
   static void rebuild_cpu_to_node_map();
   static GrowableArray<int>* cpu_to_node()    { return _cpu_to_node; }
 
+  static size_t find_large_page_size();
+  static size_t setup_large_page_size();
+
+  static bool setup_large_page_type(size_t page_size);
+  static bool transparent_huge_pages_sanity_check(bool warn, size_t pages_size);
   static bool hugetlbfs_sanity_check(bool warn, size_t page_size);
 
+  static char* reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec);
+  static char* reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec);
+  static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec);
+  static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec);
+
+  static bool release_memory_special_shm(char* base, size_t bytes);
+  static bool release_memory_special_huge_tlbfs(char* base, size_t bytes);
+
   static void print_full_memory_info(outputStream* st);
   static void print_distro_info(outputStream* st);
   static void print_libversion_info(outputStream* st);
--- a/hotspot/src/os/posix/vm/os_posix.cpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/os/posix/vm/os_posix.cpp	Fri Aug 30 00:19:42 2013 -0700
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
@@ -260,6 +260,55 @@
   return ::fdopen(fd, mode);
 }
 
+void* os::get_default_process_handle() {
+  return (void*)::dlopen(NULL, RTLD_LAZY);
+}
+
+// Builds a platform dependent Agent_OnLoad_<lib_name> function name
+// which is used to find statically linked in agents.
+// Parameters:
+//            sym_name: Symbol in library we are looking for
+//            lib_name: Name of library to look in, NULL for shared libs.
+//            is_absolute_path == true if lib_name is absolute path to agent
+//                                     such as "/a/b/libL.so"
+//            == false if only the base name of the library is passed in
+//               such as "L"
+char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
+                                    bool is_absolute_path) {
+  char *agent_entry_name;
+  size_t len;
+  size_t name_len;
+  size_t prefix_len = strlen(JNI_LIB_PREFIX);
+  size_t suffix_len = strlen(JNI_LIB_SUFFIX);
+  const char *start;
+
+  if (lib_name != NULL) {
+    len = name_len = strlen(lib_name);
+    if (is_absolute_path) {
+      // Need to strip path, prefix and suffix
+      if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
+        lib_name = ++start;
+      }
+      if (len <= (prefix_len + suffix_len)) {
+        return NULL;
+      }
+      lib_name += prefix_len;
+      name_len = strlen(lib_name) - suffix_len;
+    }
+  }
+  len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
+  agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
+  if (agent_entry_name == NULL) {
+    return NULL;
+  }
+  strcpy(agent_entry_name, sym_name);
+  if (lib_name != NULL) {
+    strcat(agent_entry_name, "_");
+    strncat(agent_entry_name, lib_name, name_len);
+  }
+  return agent_entry_name;
+}
+
 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
   assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
 }
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Fri Aug 30 00:19:42 2013 -0700
@@ -3385,7 +3385,7 @@
   return true;
 }
 
-char* os::reserve_memory_special(size_t size, char* addr, bool exec) {
+char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
   fatal("os::reserve_memory_special should not be called on Solaris.");
   return NULL;
 }
@@ -6601,3 +6601,9 @@
 
   return strlen(buffer);
 }
+
+#ifndef PRODUCT
+void TestReserveMemorySpecial_test() {
+  // No tests available for this platform
+}
+#endif
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Fri Aug 30 00:19:42 2013 -0700
@@ -3156,7 +3156,12 @@
   return true;
 }
 
-char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
+char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) {
+  assert(UseLargePages, "only for large pages");
+
+  if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
+    return NULL; // Fallback to small pages.
+  }
 
   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
@@ -5394,6 +5399,75 @@
   return true;
 }
 
+void* os::get_default_process_handle() {
+  return (void*)GetModuleHandle(NULL);
+}
+
+// Builds a platform dependent Agent_OnLoad_<lib_name> function name
+// which is used to find statically linked in agents.
+// Additionally for windows, takes into account __stdcall names.
+// Parameters:
+//            sym_name: Symbol in library we are looking for
+//            lib_name: Name of library to look in, NULL for shared libs.
+//            is_absolute_path == true if lib_name is absolute path to agent
+//                                     such as "C:/a/b/L.dll"
+//            == false if only the base name of the library is passed in
+//               such as "L"
+char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
+                                    bool is_absolute_path) {
+  char *agent_entry_name;
+  size_t len;
+  size_t name_len;
+  size_t prefix_len = strlen(JNI_LIB_PREFIX);
+  size_t suffix_len = strlen(JNI_LIB_SUFFIX);
+  const char *start;
+
+  if (lib_name != NULL) {
+    len = name_len = strlen(lib_name);
+    if (is_absolute_path) {
+      // Need to strip path, prefix and suffix
+      if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
+        lib_name = ++start;
+      } else {
+        // Need to check for C:
+        if ((start = strchr(lib_name, ':')) != NULL) {
+          lib_name = ++start;
+        }
+      }
+      if (len <= (prefix_len + suffix_len)) {
+        return NULL;
+      }
+      lib_name += prefix_len;
+      name_len = strlen(lib_name) - suffix_len;
+    }
+  }
+  len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
+  agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
+  if (agent_entry_name == NULL) {
+    return NULL;
+  }
+  if (lib_name != NULL) {
+    const char *p = strrchr(sym_name, '@');
+    if (p != NULL && p != sym_name) {
+      // sym_name == _Agent_OnLoad@XX
+      strncpy(agent_entry_name, sym_name, (p - sym_name));
+      agent_entry_name[(p-sym_name)] = '\0';
+      // agent_entry_name == _Agent_OnLoad
+      strcat(agent_entry_name, "_");
+      strncat(agent_entry_name, lib_name, name_len);
+      strcat(agent_entry_name, p);
+      // agent_entry_name == _Agent_OnLoad_lib_name@XX
+    } else {
+      strcpy(agent_entry_name, sym_name);
+      strcat(agent_entry_name, "_");
+      strncat(agent_entry_name, lib_name, name_len);
+    }
+  } else {
+    strcpy(agent_entry_name, sym_name);
+  }
+  return agent_entry_name;
+}
+
 #else
 // Kernel32 API
 typedef BOOL (WINAPI* SwitchToThread_Fn)(void);
@@ -5638,3 +5712,9 @@
 }
 
 #endif
+
+#ifndef PRODUCT
+void TestReserveMemorySpecial_test() {
+  // No tests available for this platform
+}
+#endif
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Aug 30 00:19:42 2013 -0700
@@ -3460,7 +3460,9 @@
 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
   assert_locked_or_safepoint(Heap_lock);
   size_t size = ReservedSpace::page_align_size_down(bytes);
-  if (size > 0) {
+  // Only shrink if a compaction was done so that all the free space
+  // in the generation is in a contiguous block at the end.
+  if (size > 0 && did_compact()) {
     shrink_by(size);
   }
 }
@@ -8696,9 +8698,10 @@
   assert(inFreeRange(), "Should only be called if currently in a free range.");
   HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
   assert(_sp->used_region().contains(eob - 1),
-         err_msg("eob = " PTR_FORMAT " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
+         err_msg("eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
+                 " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
                  " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
-                 _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
+                 eob, eob-1, _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
   if (eob >= _limit) {
     assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
     if (CMSTraceSweeper) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Aug 30 00:19:42 2013 -0700
@@ -981,7 +981,8 @@
 
     if (should_try_gc) {
       bool succeeded;
-      result = do_collection_pause(word_size, gc_count_before, &succeeded);
+      result = do_collection_pause(word_size, gc_count_before, &succeeded,
+          GCCause::_g1_inc_collection_pause);
       if (result != NULL) {
         assert(succeeded, "only way to get back a non-NULL result");
         return result;
@@ -1106,7 +1107,8 @@
       // enough space for the allocation to succeed after the pause.
 
       bool succeeded;
-      result = do_collection_pause(word_size, gc_count_before, &succeeded);
+      result = do_collection_pause(word_size, gc_count_before, &succeeded,
+          GCCause::_g1_humongous_allocation);
       if (result != NULL) {
         assert(succeeded, "only way to get back a non-NULL result");
         return result;
@@ -2006,10 +2008,12 @@
 
   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
   size_t max_byte_size = collector_policy()->max_heap_byte_size();
+  size_t heap_alignment = collector_policy()->max_alignment();
 
   // Ensure that the sizes are properly aligned.
   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
+  Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
 
   _cg1r = new ConcurrentG1Refine(this);
 
@@ -2026,12 +2030,8 @@
   // If this happens then we could end up using a non-optimal
   // compressed oops mode.
 
-  // Since max_byte_size is aligned to the size of a heap region (checked
-  // above).
-  Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
-
   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
-                                                 HeapRegion::GrainBytes);
+                                                 heap_alignment);
 
   // It is important to do this in a way such that concurrent readers can't
   // temporarily think something is in the heap.  (I've actually seen this
@@ -3700,14 +3700,15 @@
 
 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
                                                unsigned int gc_count_before,
-                                               bool* succeeded) {
+                                               bool* succeeded,
+                                               GCCause::Cause gc_cause) {
   assert_heap_not_locked_and_not_at_safepoint();
   g1_policy()->record_stop_world_start();
   VM_G1IncCollectionPause op(gc_count_before,
                              word_size,
                              false, /* should_initiate_conc_mark */
                              g1_policy()->max_pause_time_ms(),
-                             GCCause::_g1_inc_collection_pause);
+                             gc_cause);
   VMThread::execute(&op);
 
   HeapWord* result = op.result();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Aug 30 00:19:42 2013 -0700
@@ -776,9 +776,10 @@
   // it has to be read while holding the Heap_lock. Currently, both
   // methods that call do_collection_pause() release the Heap_lock
   // before the call, so it's easy to read gc_count_before just before.
-  HeapWord* do_collection_pause(size_t       word_size,
-                                unsigned int gc_count_before,
-                                bool*        succeeded);
+  HeapWord* do_collection_pause(size_t         word_size,
+                                unsigned int   gc_count_before,
+                                bool*          succeeded,
+                                GCCause::Cause gc_cause);
 
   // The guts of the incremental collection pause, executed by the vm
   // thread. It returns false if it is unable to do the collection due
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Aug 30 00:19:42 2013 -0700
@@ -313,7 +313,8 @@
 void G1CollectorPolicy::initialize_flags() {
   set_min_alignment(HeapRegion::GrainBytes);
   size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
-  set_max_alignment(MAX2(card_table_alignment, min_alignment()));
+  size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
+  set_max_alignment(MAX3(card_table_alignment, min_alignment(), page_size));
   if (SurvivorRatio < 1) {
     vm_exit_during_initialization("Invalid survivor ratio specified");
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Fri Aug 30 00:19:42 2013 -0700
@@ -70,9 +70,6 @@
   guarantee(target_pause_time_ms > 0.0,
             err_msg("target_pause_time_ms = %1.6lf should be positive",
                     target_pause_time_ms));
-  guarantee(word_size == 0 || gc_cause == GCCause::_g1_inc_collection_pause,
-            "we can only request an allocation if the GC cause is for "
-            "an incremental GC pause");
   _gc_cause = gc_cause;
 }
 
--- a/hotspot/src/share/vm/memory/allocation.hpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/allocation.hpp	Fri Aug 30 00:19:42 2013 -0700
@@ -666,7 +666,7 @@
   NEW_RESOURCE_ARRAY_RETURN_NULL(type, 1)
 
 #define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)\
-  (type*) AllocateHeap(size * sizeof(type), memflags, pc, allocfail)
+  (type*) AllocateHeap((size) * sizeof(type), memflags, pc, allocfail)
 
 #define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\
   (type*) (AllocateHeap((size) * sizeof(type), memflags, pc))
@@ -675,16 +675,16 @@
   (type*) (AllocateHeap((size) * sizeof(type), memflags))
 
 #define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, memflags, pc)\
-  NEW_C_HEAP_ARRAY3(type, size, memflags, pc, AllocFailStrategy::RETURN_NULL)
+  NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL)
 
 #define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\
-  NEW_C_HEAP_ARRAY3(type, size, memflags, (address)0, AllocFailStrategy::RETURN_NULL)
+  NEW_C_HEAP_ARRAY3(type, (size), memflags, (address)0, AllocFailStrategy::RETURN_NULL)
 
 #define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
-  (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags))
+  (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags))
 
 #define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\
-   (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL))
+  (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL))
 
 #define FREE_C_HEAP_ARRAY(type, old, memflags) \
   FreeHeap((char*)(old), memflags)
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp	Fri Aug 30 00:19:42 2013 -0700
@@ -193,6 +193,8 @@
       alignment = lcm(os::large_page_size(), alignment);
   }
 
+  assert(alignment >= min_alignment(), "Must be");
+
   return alignment;
 }
 
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Fri Aug 30 00:19:42 2013 -0700
@@ -95,13 +95,13 @@
   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
 
   // The heap must be at least as aligned as generations.
-  size_t alignment = Generation::GenGrain;
+  size_t gen_alignment = Generation::GenGrain;
 
   _gen_specs = gen_policy()->generations();
 
   // Make sure the sizes are all aligned.
   for (i = 0; i < _n_gens; i++) {
-    _gen_specs[i]->align(alignment);
+    _gen_specs[i]->align(gen_alignment);
   }
 
   // Allocate space for the heap.
@@ -109,9 +109,11 @@
   char* heap_address;
   size_t total_reserved = 0;
   int n_covered_regions = 0;
-  ReservedSpace heap_rs(0);
+  ReservedSpace heap_rs;
 
-  heap_address = allocate(alignment, &total_reserved,
+  size_t heap_alignment = collector_policy()->max_alignment();
+
+  heap_address = allocate(heap_alignment, &total_reserved,
                           &n_covered_regions, &heap_rs);
 
   if (!heap_rs.is_reserved()) {
@@ -168,6 +170,8 @@
   const size_t pageSize = UseLargePages ?
       os::large_page_size() : os::vm_page_size();
 
+  assert(alignment % pageSize == 0, "Must be");
+
   for (int i = 0; i < _n_gens; i++) {
     total_reserved += _gen_specs[i]->max_size();
     if (total_reserved < _gen_specs[i]->max_size()) {
@@ -175,24 +179,17 @@
     }
     n_covered_regions += _gen_specs[i]->n_covered_regions();
   }
-  assert(total_reserved % pageSize == 0,
-         err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
-                 SIZE_FORMAT, total_reserved, pageSize));
+  assert(total_reserved % alignment == 0,
+         err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
+                 SIZE_FORMAT, total_reserved, alignment));
 
   // Needed until the cardtable is fixed to have the right number
   // of covered regions.
   n_covered_regions += 2;
 
-  if (UseLargePages) {
-    assert(total_reserved != 0, "total_reserved cannot be 0");
-    total_reserved = round_to(total_reserved, os::large_page_size());
-    if (total_reserved < os::large_page_size()) {
-      vm_exit_during_initialization(overflow_msg);
-    }
-  }
+  *_total_reserved = total_reserved;
+  *_n_covered_regions = n_covered_regions;
 
-      *_total_reserved = total_reserved;
-      *_n_covered_regions = n_covered_regions;
   *heap_rs = Universe::reserve_heap(total_reserved, alignment);
   return heap_rs->base();
 }
--- a/hotspot/src/share/vm/memory/metaspace.cpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/metaspace.cpp	Fri Aug 30 00:19:42 2013 -0700
@@ -345,7 +345,7 @@
 };
 
   // byte_size is the size of the associated virtualspace.
-VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0), _container_count(0) {
+VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
   // align up to vm allocation granularity
   byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
 
--- a/hotspot/src/share/vm/memory/universe.cpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/universe.cpp	Fri Aug 30 00:19:42 2013 -0700
@@ -681,17 +681,23 @@
 // 32Gb
 // OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
 
-char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
+char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) {
+  assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be");
+  assert(is_size_aligned((size_t)NarrowOopHeapMax, alignment), "Must be");
+  assert(is_size_aligned(heap_size, alignment), "Must be");
+
+  uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment);
+
   size_t base = 0;
 #ifdef _LP64
   if (UseCompressedOops) {
     assert(mode == UnscaledNarrowOop  ||
            mode == ZeroBasedNarrowOop ||
            mode == HeapBasedNarrowOop, "mode is invalid");
-    const size_t total_size = heap_size + HeapBaseMinAddress;
+    const size_t total_size = heap_size + heap_base_min_address_aligned;
     // Return specified base for the first request.
     if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
-      base = HeapBaseMinAddress;
+      base = heap_base_min_address_aligned;
 
     // If the total size is small enough to allow UnscaledNarrowOop then
     // just use UnscaledNarrowOop.
@@ -742,6 +748,8 @@
     }
   }
 #endif
+
+  assert(is_ptr_aligned((char*)base, alignment), "Must be");
   return (char*)base; // also return NULL (don't care) for 32-bit VM
 }
 
@@ -867,27 +875,33 @@
   size_t total_reserved = align_size_up(heap_size, alignment);
   assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
       "heap size is too big for compressed oops");
-  char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
 
-  ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr);
+  bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
+  assert(!UseLargePages
+      || UseParallelOldGC
+      || use_large_pages, "Wrong alignment to use large pages");
+
+  char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
+
+  ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, addr);
 
   if (UseCompressedOops) {
     if (addr != NULL && !total_rs.is_reserved()) {
       // Failed to reserve at specified address - the requested memory
       // region is taken already, for example, by 'java' launcher.
       // Try again to reserver heap higher.
-      addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
+      addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop);
 
       ReservedHeapSpace total_rs0(total_reserved, alignment,
-                                  UseLargePages, addr);
+          use_large_pages, addr);
 
       if (addr != NULL && !total_rs0.is_reserved()) {
         // Failed to reserve at specified address again - give up.
-        addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
+        addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop);
         assert(addr == NULL, "");
 
         ReservedHeapSpace total_rs1(total_reserved, alignment,
-                                    UseLargePages, addr);
+            use_large_pages, addr);
         total_rs = total_rs1;
       } else {
         total_rs = total_rs0;
--- a/hotspot/src/share/vm/memory/universe.hpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/universe.hpp	Fri Aug 30 00:19:42 2013 -0700
@@ -346,7 +346,7 @@
   };
   static NARROW_OOP_MODE narrow_oop_mode();
   static const char* narrow_oop_mode_to_string(NARROW_OOP_MODE mode);
-  static char*    preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode);
+  static char*    preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode);
   static char*    preferred_metaspace_base(size_t heap_size, NARROW_OOP_MODE mode);
   static address  narrow_oop_base()                       { return  _narrow_oop._base; }
   static bool  is_narrow_oop_base(void* addr)             { return (narrow_oop_base() == (address)addr); }
--- a/hotspot/src/share/vm/prims/jni.cpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/prims/jni.cpp	Fri Aug 30 00:19:42 2013 -0700
@@ -3234,19 +3234,22 @@
  HOTSPOT_JNI_GETSTRINGCHARS_ENTRY(
                                   env, string, (uintptr_t *) isCopy);
 #endif /* USDT2 */
-  //%note jni_5
-  if (isCopy != NULL) {
-    *isCopy = JNI_TRUE;
-  }
   oop s = JNIHandles::resolve_non_null(string);
   int s_len = java_lang_String::length(s);
   typeArrayOop s_value = java_lang_String::value(s);
   int s_offset = java_lang_String::offset(s);
-  jchar* buf = NEW_C_HEAP_ARRAY(jchar, s_len + 1, mtInternal);  // add one for zero termination
-  if (s_len > 0) {
-    memcpy(buf, s_value->char_at_addr(s_offset), sizeof(jchar)*s_len);
+  jchar* buf = NEW_C_HEAP_ARRAY_RETURN_NULL(jchar, s_len + 1, mtInternal);  // add one for zero termination
+  /* JNI Specification states return NULL on OOM */
+  if (buf != NULL) {
+    if (s_len > 0) {
+      memcpy(buf, s_value->char_at_addr(s_offset), sizeof(jchar)*s_len);
+    }
+    buf[s_len] = 0;
+    //%note jni_5
+    if (isCopy != NULL) {
+      *isCopy = JNI_TRUE;
+    }
   }
-  buf[s_len] = 0;
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetStringChars__return, buf);
 #else /* USDT2 */
@@ -3335,9 +3338,14 @@
 #endif /* USDT2 */
   oop java_string = JNIHandles::resolve_non_null(string);
   size_t length = java_lang_String::utf8_length(java_string);
-  char* result = AllocateHeap(length + 1, mtInternal);
-  java_lang_String::as_utf8_string(java_string, result, (int) length + 1);
-  if (isCopy != NULL) *isCopy = JNI_TRUE;
+  /* JNI Specification states return NULL on OOM */
+  char* result = AllocateHeap(length + 1, mtInternal, 0, AllocFailStrategy::RETURN_NULL);
+  if (result != NULL) {
+    java_lang_String::as_utf8_string(java_string, result, (int) length + 1);
+    if (isCopy != NULL) {
+      *isCopy = JNI_TRUE;
+    }
+  }
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetStringUTFChars__return, result);
 #else /* USDT2 */
@@ -3591,11 +3599,16 @@
      * Avoid asserts in typeArrayOop. */ \
     result = (ElementType*)get_bad_address(); \
   } else { \
-    result = NEW_C_HEAP_ARRAY(ElementType, len, mtInternal); \
-    /* copy the array to the c chunk */ \
-    memcpy(result, a->Tag##_at_addr(0), sizeof(ElementType)*len); \
+    /* JNI Specification states return NULL on OOM */                    \
+    result = NEW_C_HEAP_ARRAY_RETURN_NULL(ElementType, len, mtInternal); \
+    if (result != NULL) {                                                \
+      /* copy the array to the c chunk */                                \
+      memcpy(result, a->Tag##_at_addr(0), sizeof(ElementType)*len);      \
+      if (isCopy) {                                                      \
+        *isCopy = JNI_TRUE;                                              \
+      }                                                                  \
+    }                                                                    \
   } \
-  if (isCopy) *isCopy = JNI_TRUE; \
   DTRACE_PROBE1(hotspot_jni, Get##Result##ArrayElements__return, result);\
   return result; \
 JNI_END
@@ -3628,11 +3641,16 @@
      * Avoid asserts in typeArrayOop. */ \
     result = (ElementType*)get_bad_address(); \
   } else { \
-    result = NEW_C_HEAP_ARRAY(ElementType, len, mtInternal); \
-    /* copy the array to the c chunk */ \
-    memcpy(result, a->Tag##_at_addr(0), sizeof(ElementType)*len); \
+    /* JNI Specification states return NULL on OOM */                    \
+    result = NEW_C_HEAP_ARRAY_RETURN_NULL(ElementType, len, mtInternal); \
+    if (result != NULL) {                                                \
+      /* copy the array to the c chunk */                                \
+      memcpy(result, a->Tag##_at_addr(0), sizeof(ElementType)*len);      \
+      if (isCopy) {                                                      \
+        *isCopy = JNI_TRUE;                                              \
+      }                                                                  \
+    }                                                                    \
   } \
-  if (isCopy) *isCopy = JNI_TRUE; \
   ReturnProbe; \
   return result; \
 JNI_END
@@ -5027,9 +5045,15 @@
   tty->print_cr("Running test: " #unit_test_function_call); \
   unit_test_function_call
 
+// Forward declaration
+void TestReservedSpace_test();
+void TestReserveMemorySpecial_test();
+
 void execute_internal_vm_tests() {
   if (ExecuteInternalVMTests) {
     tty->print_cr("Running internal VM tests");
+    run_unit_test(TestReservedSpace_test());
+    run_unit_test(TestReserveMemorySpecial_test());
     run_unit_test(GlobalDefinitions::test_globals());
     run_unit_test(GCTimerAllTest::all());
     run_unit_test(arrayOopDesc::test_max_array_length());
--- a/hotspot/src/share/vm/prims/jvmti.xml	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/prims/jvmti.xml	Fri Aug 30 00:19:42 2013 -0700
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="ISO-8859-1"?>
 <?xml-stylesheet type="text/xsl" href="jvmti.xsl"?>
 <!--
- Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
  This code is free software; you can redistribute it and/or modify it
@@ -358,7 +358,7 @@
 <specification label="JVM(TM) Tool Interface"
         majorversion="1"
         minorversion="2"
-        microversion="2">
+        microversion="3">
   <title subtitle="Version">
     <tm>JVM</tm> Tool Interface
   </title>
@@ -431,12 +431,46 @@
     On the <tm>Solaris</tm> Operating Environment, an agent library is a shared
     object (<code>.so</code> file).
     <p/>
+
     An agent may be started at VM startup by specifying the agent library
     name using a <internallink id="starting">command line option</internallink>.
     Some implementations may support a mechanism to <internallink id="onattach"> 
     start agents</internallink> in the live <functionlink id="GetPhase">phase</functionlink>.
     The details of how this is initiated are implementation specific.
   </intro>
+
+    <intro id="entry point" label="Statically Linked Agents (since version 1.2.3)">
+
+      A native JVMTI Agent may be <i>statically linked</i> with the VM.
+      The manner in which the library and VM image are combined is
+      implementation-dependent.
+      An agent L whose image has been combined with the VM is defined as
+      <i>statically linked</i> if and only if the agent exports a function
+      called Agent_OnLoad_L.
+<p/>
+      If a <i>statically linked</i> agent L exports a function called
+      Agent_OnLoad_L and a function called Agent_OnLoad, the Agent_OnLoad
+      function will be ignored.
+      If an agent L is <i>statically linked</i>, an Agent_OnLoad_L
+      function will be invoked with the same arguments and expected return
+      value as specified for the Agent_OnLoad function.
+      An agent L that is <i>statically linked</i> will prohibit an agent of
+      the same name from being loaded dynamically.
+<p/>
+      The VM will invoke the Agent_OnUnload_L function of the agent, if such
+      a function is exported, at the same point during startup as it would
+      have called the dynamic entry point Agent_OnUnLoad.
+      If a <i>statically linked</i> agent L exports a function called
+      Agent_OnUnLoad_L and a function called Agent_OnUnLoad, the Agent_OnUnLoad
+      function will be ignored.
+<p/>
+      If an agent L is <i>statically linked</i>, an Agent_OnAttach_L function
+      will be invoked with the same arguments and expected return value as
+      specified for the Agent_OnAttach function.
+      If a <i>statically linked</i> agent L exports a function called
+      Agent_OnAttach_L and a function called Agent_OnAttach, the Agent_OnAttach
+      function will be ignored.
+</intro>
   
   <intro id="starting" label="Agent Command Line Options">
     The term "command-line option" is used below to
@@ -455,7 +489,7 @@
       <dd>
 	The name following <code>-agentlib:</code> is the name of the
 	library to load.  Lookup of the library, both its full name and location,
-	proceeds in a platform-specific manner. 
+	proceeds in a platform-specific manner.
 	Typically, the <i>&lt;agent-lib-name&gt;</i> is expanded to an
 	operating system specific file name.
 	The <i>&lt;options&gt;</i> will be passed to the agent on start-up.
@@ -463,7 +497,11 @@
 	<code>-agentlib:foo=opt1,opt2</code> is specified, the VM will attempt to 
 	load the shared library <code>foo.dll</code> from the system <code>PATH</code>
         under <tm>Windows</tm> or <code>libfoo.so</code> from the 
-	<code>LD_LIBRARY_PATH</code> under the <tm>Solaris</tm> operating environment.
+	<code>LD_LIBRARY_PATH</code> under the <tm>Solaris</tm> operating
+        environment.
+        If the agent library is statically linked into the executable
+        then no actual loading takes place.
+    <p/>
       </dd>
       <dt><code>-agentpath:</code><i>&lt;path-to-agent&gt;</i><code>=</code><i>&lt;options&gt;</i></dt>
       <dd>
@@ -473,11 +511,20 @@
 	The <i>&lt;options&gt;</i> will be passed to the agent on start-up.
 	For example, if the option 
 	<code>-agentpath:c:\myLibs\foo.dll=opt1,opt2</code> is specified, the VM will attempt to 
-	load the shared library <code>c:\myLibs\foo.dll</code>.
+	load the shared library <code>c:\myLibs\foo.dll</code>. If the agent
+        library is statically linked into the executable
+        then no actual loading takes place.
+    <p/>
       </dd>
     </dl>
-    The start-up routine <internallink id="onload"><code>Agent_OnLoad</code></internallink>
-    in the library will be invoked.
+    For a dynamic shared library agent, the start-up routine
+    <internallink id="onload"><code>Agent_OnLoad</code></internallink>
+    in the library will be invoked. If the agent library is statically linked
+    into the executable then the system will attempt to invoke the
+    <code>Agent_OnLoad_&lt;agent-lib-name&gt;</code> entry point where
+    &lt;agent-lib-name&gt; is the basename of the 
+    agent. In the above example <code>-agentpath:c:\myLibs\foo.dll=opt1,opt2</code>,
+    the system will attempt to find and call the <code>Agent_OnLoad_foo</code> start-up routine.
     <p/>
     Libraries loaded with <code>-agentlib:</code> or <code>-agentpath:</code>
     will be searched for JNI native method implementations to facilitate the
@@ -502,11 +549,13 @@
     If the agent is started in the <code>OnLoad</code>
     <functionlink id="GetPhase">phase</functionlink> the function
     <internallink id="onload"><code>Agent_OnLoad</code></internallink>
-    will be invoked.
+    or <internallink id="onload"><code>Agent_OnLoad_L</code></internallink>
+    for statically linked agents will be invoked.
     If the agent is started in the live
     <functionlink id="GetPhase">phase</functionlink> the function
     <internallink id="onattach"><code>Agent_OnAttach</code></internallink>
-    will be invoked.
+    or <internallink id="onattach"><code>Agent_OnAttach_L</code></internallink>
+    for statically linked agents will be invoked.
     Exactly one call to a start-up function is made per agent.  
   </intro>
 
@@ -516,6 +565,11 @@
     <example>
 JNIEXPORT jint JNICALL 
 Agent_OnLoad(JavaVM *vm, char *options, void *reserved)</example>
+    Or for a statically linked agent named 'L':
+    <example>
+JNIEXPORT jint JNICALL 
+Agent_OnLoad_L(JavaVM *vm, char *options, void *reserved)</example>
+
     The VM will start the agent by calling this function.  
     It will be called early enough in VM initialization that:
     <ul>
@@ -531,7 +585,8 @@
       <li>no objects have been created</li>
     </ul>
     <p/>
-    The VM will call the <code>Agent_OnLoad</code> function with
+    The VM will call the <code>Agent_OnLoad</code> or
+    <code>Agent_OnLoad_&lt;agent-lib-name&gt;</code> function with
     <i>&lt;options&gt;</i> as the second argument - 
     that is, using the command-line option examples,
     <code>"opt1,opt2"</code> will be passed to the <code>char *options</code> 
@@ -540,7 +595,8 @@
     <internallink id="mUTF">modified UTF-8</internallink> string.
     If <i>=&lt;options&gt;</i> is not specified, 
     a zero length string is passed to <code>options</code>.
-    The lifespan of the <code>options</code> string is the <code>Agent_OnLoad</code>
+    The lifespan of the <code>options</code> string is the
+    <code>Agent_OnLoad</code> or <code>Agent_OnLoad_&lt;agent-lib-name&gt;</code>
     call.  If needed beyond this time the string or parts of the string must
     be copied.
     The period between when <code>Agent_OnLoad</code> is called and when it
@@ -570,7 +626,8 @@
       their functionality.
     </rationale>
     <p/>
-    The return value from <code>Agent_OnLoad</code> is used to indicate an error.
+    The return value from <code>Agent_OnLoad</code> or
+    <code>Agent_OnLoad_&lt;agent-lib-name&gt;</code> is used to indicate an error.
     Any value other than zero indicates an error and causes termination of the VM.
   </intro>
   
@@ -587,6 +644,11 @@
     <example>
 JNIEXPORT jint JNICALL 
 Agent_OnAttach(JavaVM* vm, char *options, void *reserved)</example>
+Or for a statically linked agent named 'L':
+    <example>
+JNIEXPORT jint JNICALL 
+Agent_OnAttach_L(JavaVM* vm, char *options, void *reserved)</example>
+
     <p/>         
     The VM will start the agent by calling this function.  
     It will be called in the context of a thread
@@ -596,13 +658,14 @@
     </internallink> string.
     If startup options were not provided, a zero length string is passed to 
     <code>options</code>. The lifespan of the <code>options</code> string is the 
-    <code>Agent_OnAttach</code> call.  If needed beyond this time the string or parts of 
-    the string must be copied.
+    <code>Agent_OnAttach</code> or <code>Agent_OnAttach_&lt;agent-lib-name&gt;</code> call.
+    If needed beyond this time the string or parts of the string must be copied.
     <p/>
     Note that some <internallink id="capability">capabilities</internallink> 
     may not be available in the live phase.
     <p/>
-    The <code>Agent_OnAttach</code> function initializes the agent and returns a value
+    The <code>Agent_OnAttach</code> or <code>Agent_OnAttach_&lt;agent-lib-name
+    &gt;</code> function initializes the agent and returns a value
     to the VM to indicate if an error occurred. Any value other than zero indicates an error. 
     An error does not cause the VM to terminate. Instead the VM ignores the error, or takes 
     some implementation specific action -- for example it might print an error to standard error, 
@@ -615,8 +678,14 @@
     <example>
 JNIEXPORT void JNICALL 
 Agent_OnUnload(JavaVM *vm)</example>
+    Or for a statically linked agent named 'L':
+    <example>
+JNIEXPORT void JNICALL 
+Agent_OnUnload_L(JavaVM *vm)</example>
+
     This function will be called by the VM when the library is about to be unloaded.
-    The library will be unloaded and this function will be called if some platform specific 
+    The library will be unloaded (unless it is statically linked into the
+    executable) and this function will be called if some platform specific 
     mechanism causes the unload (an unload mechanism is not specified in this document)
     or the library is (in effect) unloaded by the termination of the VM whether through 
     normal termination or VM failure, including start-up failure.
@@ -625,8 +694,9 @@
     <eventlink id="VMDeath">VM Death event</eventlink>: for the VM Death event
     to be sent, the VM must have run at least to the point of initialization and a valid 
     <jvmti/> environment must exist which has set a callback for VMDeath
-    and enabled the event
-    None of these are required for <code>Agent_OnUnload</code> and this function
+    and enabled the event.
+    None of these are required for <code>Agent_OnUnload</code> or
+    <code>Agent_OnUnload_&lt;agent-lib-name&gt;</code> and this function
     is also called if the library is unloaded for other reasons.
     In the case that a VM Death event is sent, it will be sent before this 
     function is called (assuming this function is called due to VM termination).
@@ -10701,10 +10771,14 @@
           <constants id="jvmtiPhase" label="Phases of execution" kind="enum">
             <constant id="JVMTI_PHASE_ONLOAD" num="1">
               <code>OnLoad</code> phase: while in the
-              <internallink id="onload"><code>Agent_OnLoad</code></internallink> function.
+              <internallink id="onload"><code>Agent_OnLoad</code></internallink>
+              or, for statically linked agents, the <internallink id="onload">
+              <code>Agent_OnLoad_&lt;agent-lib-name&gt;
+              </code></internallink> function.
             </constant>
             <constant id="JVMTI_PHASE_PRIMORDIAL" num="2">
-              Primordial phase: between return from <code>Agent_OnLoad</code> and the
+              Primordial phase: between return from <code>Agent_OnLoad</code>
+              or <code>Agent_OnLoad_&lt;agent-lib-name&gt;</code> and the
               <code>VMStart</code> event.
             </constant>
             <constant id="JVMTI_PHASE_START" num="6">
@@ -14261,6 +14335,9 @@
   <change date="11 October 2012" version="1.2.2">
       Fixed the "HTTP" and "Missing Anchor" errors reported by the LinkCheck tool.
   </change>
+  <change date="19 June 2013" version="1.2.3">
+      Added support for statically linked agents.
+  </change>
 </changehistory>
 
 </specification>
--- a/hotspot/src/share/vm/prims/jvmtiExport.cpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiExport.cpp	Fri Aug 30 00:19:42 2013 -0700
@@ -2191,6 +2191,8 @@
   char buffer[JVM_MAXPATHLEN];
   void* library = NULL;
   jint result = JNI_ERR;
+  const char *on_attach_symbols[] = AGENT_ONATTACH_SYMBOLS;
+  size_t num_symbol_entries = ARRAY_SIZE(on_attach_symbols);
 
   // get agent name and options
   const char* agent = op->arg(0);
@@ -2200,43 +2202,48 @@
   // The abs paramter should be "true" or "false"
   bool is_absolute_path = (absParam != NULL) && (strcmp(absParam,"true")==0);
 
+  // Initially marked as invalid. It will be set to valid if we can find the agent
+  AgentLibrary *agent_lib = new AgentLibrary(agent, options, is_absolute_path, NULL);
 
-  // If the path is absolute we attempt to load the library. Otherwise we try to
-  // load it from the standard dll directory.
+  // Check for statically linked in agent. If not found then if the path is
+  // absolute we attempt to load the library. Otherwise we try to load it
+  // from the standard dll directory.
 
-  if (is_absolute_path) {
-    library = os::dll_load(agent, ebuf, sizeof ebuf);
-  } else {
-    // Try to load the agent from the standard dll directory
-    if (os::dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(),
-                           agent)) {
-      library = os::dll_load(buffer, ebuf, sizeof ebuf);
-    }
-    if (library == NULL) {
-      // not found - try local path
-      char ns[1] = {0};
-      if (os::dll_build_name(buffer, sizeof(buffer), ns, agent)) {
+  if (!os::find_builtin_agent(agent_lib, on_attach_symbols, num_symbol_entries)) {
+    if (is_absolute_path) {
+      library = os::dll_load(agent, ebuf, sizeof ebuf);
+    } else {
+      // Try to load the agent from the standard dll directory
+      if (os::dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(),
+                             agent)) {
         library = os::dll_load(buffer, ebuf, sizeof ebuf);
       }
+      if (library == NULL) {
+        // not found - try local path
+        char ns[1] = {0};
+        if (os::dll_build_name(buffer, sizeof(buffer), ns, agent)) {
+          library = os::dll_load(buffer, ebuf, sizeof ebuf);
+        }
+      }
     }
+    if (library != NULL) {
+      agent_lib->set_os_lib(library);
+      agent_lib->set_valid();
+    }
   }
-
   // If the library was loaded then we attempt to invoke the Agent_OnAttach
   // function
-  if (library != NULL) {
-
+  if (agent_lib->valid()) {
     // Lookup the Agent_OnAttach function
     OnAttachEntry_t on_attach_entry = NULL;
-    const char *on_attach_symbols[] = AGENT_ONATTACH_SYMBOLS;
-    for (uint symbol_index = 0; symbol_index < ARRAY_SIZE(on_attach_symbols); symbol_index++) {
-      on_attach_entry =
-        CAST_TO_FN_PTR(OnAttachEntry_t, os::dll_lookup(library, on_attach_symbols[symbol_index]));
-      if (on_attach_entry != NULL) break;
-    }
-
+    on_attach_entry = CAST_TO_FN_PTR(OnAttachEntry_t,
+       os::find_agent_function(agent_lib, false, on_attach_symbols, num_symbol_entries));
     if (on_attach_entry == NULL) {
       // Agent_OnAttach missing - unload library
-      os::dll_unload(library);
+      if (!agent_lib->is_static_lib()) {
+        os::dll_unload(library);
+      }
+      delete agent_lib;
     } else {
       // Invoke the Agent_OnAttach function
       JavaThread* THREAD = JavaThread::current();
@@ -2256,7 +2263,9 @@
       // If OnAttach returns JNI_OK then we add it to the list of
       // agent libraries so that we can call Agent_OnUnload later.
       if (result == JNI_OK) {
-        Arguments::add_loaded_agent(agent, (char*)options, is_absolute_path, library);
+        Arguments::add_loaded_agent(agent_lib);
+      } else {
+        delete agent_lib;
       }
 
       // Agent_OnAttach executed so completion status is JNI_OK
--- a/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Aug 30 00:19:42 2013 -0700
@@ -1554,18 +1554,22 @@
     return false;
   }
 
-  // rewrite sourc file name index:
+  // rewrite source file name index:
   u2 source_file_name_idx = scratch_class->source_file_name_index();
   if (source_file_name_idx != 0) {
     u2 new_source_file_name_idx = find_new_index(source_file_name_idx);
-    scratch_class->set_source_file_name_index(new_source_file_name_idx);
+    if (new_source_file_name_idx != 0) {
+      scratch_class->set_source_file_name_index(new_source_file_name_idx);
+    }
   }
 
   // rewrite class generic signature index:
   u2 generic_signature_index = scratch_class->generic_signature_index();
   if (generic_signature_index != 0) {
     u2 new_generic_signature_index = find_new_index(generic_signature_index);
-    scratch_class->set_generic_signature_index(new_generic_signature_index);
+    if (new_generic_signature_index != 0) {
+      scratch_class->set_generic_signature_index(new_generic_signature_index);
+    }
   }
 
   return true;
@@ -1737,7 +1741,10 @@
 
     for (int i = 0; i < len; i++) {
       const u2 cp_index = elem[i].name_cp_index;
-      elem[i].name_cp_index = find_new_index(cp_index);
+      const u2 new_cp_index = find_new_index(cp_index);
+      if (new_cp_index != 0) {
+        elem[i].name_cp_index = new_cp_index;
+      }
     }
   }
 } // end rewrite_cp_refs_in_method()
--- a/hotspot/src/share/vm/prims/whitebox.cpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/prims/whitebox.cpp	Fri Aug 30 00:19:42 2013 -0700
@@ -128,7 +128,7 @@
 WB_END
 #endif // INCLUDE_ALL_GCS
 
-#ifdef INCLUDE_NMT
+#if INCLUDE_NMT
 // Alloc memory using the test memory type so that we can use that to see if
 // NMT picks it up correctly
 WB_ENTRY(jlong, WB_NMTMalloc(JNIEnv* env, jobject o, jlong size))
@@ -181,6 +181,10 @@
   return MemTracker::wbtest_wait_for_data_merge();
 WB_END
 
+WB_ENTRY(jboolean, WB_NMTIsDetailSupported(JNIEnv* env))
+  return MemTracker::tracking_level() == MemTracker::NMT_detail;
+WB_END
+
 #endif // INCLUDE_NMT
 
 static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobject method) {
@@ -439,7 +443,7 @@
   {CC"g1NumFreeRegions",   CC"()J",                   (void*)&WB_G1NumFreeRegions  },
   {CC"g1RegionSize",       CC"()I",                   (void*)&WB_G1RegionSize      },
 #endif // INCLUDE_ALL_GCS
-#ifdef INCLUDE_NMT
+#if INCLUDE_NMT
   {CC"NMTMalloc",           CC"(J)J",                 (void*)&WB_NMTMalloc          },
   {CC"NMTFree",             CC"(J)V",                 (void*)&WB_NMTFree            },
   {CC"NMTReserveMemory",    CC"(J)J",                 (void*)&WB_NMTReserveMemory   },
@@ -447,6 +451,7 @@
   {CC"NMTUncommitMemory",   CC"(JJ)V",                (void*)&WB_NMTUncommitMemory  },
   {CC"NMTReleaseMemory",    CC"(JJ)V",                (void*)&WB_NMTReleaseMemory   },
   {CC"NMTWaitForDataMerge", CC"()Z",                  (void*)&WB_NMTWaitForDataMerge},
+  {CC"NMTIsDetailSupported",CC"()Z",                  (void*)&WB_NMTIsDetailSupported},
 #endif // INCLUDE_NMT
   {CC"deoptimizeAll",      CC"()V",                   (void*)&WB_DeoptimizeAll     },
   {CC"deoptimizeMethod",   CC"(Ljava/lang/reflect/Executable;Z)I",
--- a/hotspot/src/share/vm/runtime/arguments.hpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.hpp	Fri Aug 30 00:19:42 2013 -0700
@@ -118,11 +118,21 @@
 // For use by -agentlib, -agentpath and -Xrun
 class AgentLibrary : public CHeapObj<mtInternal> {
   friend class AgentLibraryList;
+public:
+  // Is this library valid or not. Don't rely on os_lib == NULL as statically
+  // linked lib could have handle of RTLD_DEFAULT which == 0 on some platforms
+  enum AgentState {
+    agent_invalid = 0,
+    agent_valid   = 1
+  };
+
  private:
   char*           _name;
   char*           _options;
   void*           _os_lib;
   bool            _is_absolute_path;
+  bool            _is_static_lib;
+  AgentState      _state;
   AgentLibrary*   _next;
 
  public:
@@ -133,6 +143,11 @@
   void* os_lib() const                      { return _os_lib; }
   void set_os_lib(void* os_lib)             { _os_lib = os_lib; }
   AgentLibrary* next() const                { return _next; }
+  bool is_static_lib() const                { return _is_static_lib; }
+  void set_static_lib(bool static_lib)      { _is_static_lib = static_lib; }
+  bool valid()                              { return (_state == agent_valid); }
+  void set_valid()                          { _state = agent_valid; }
+  void set_invalid()                        { _state = agent_invalid; }
 
   // Constructor
   AgentLibrary(const char* name, const char* options, bool is_absolute_path, void* os_lib) {
@@ -147,6 +162,8 @@
     _is_absolute_path = is_absolute_path;
     _os_lib = os_lib;
     _next = NULL;
+    _state = agent_invalid;
+    _is_static_lib = false;
   }
 };
 
@@ -276,6 +293,8 @@
     { _agentList.add(new AgentLibrary(name, options, absolute_path, NULL)); }
 
   // Late-binding agents not started via arguments
+  static void add_loaded_agent(AgentLibrary *agentLib)
+    { _agentList.add(agentLib); }
   static void add_loaded_agent(const char* name, char* options, bool absolute_path, void* os_lib)
     { _agentList.add(new AgentLibrary(name, options, absolute_path, os_lib)); }
 
--- a/hotspot/src/share/vm/runtime/globals.hpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Fri Aug 30 00:19:42 2013 -0700
@@ -1933,6 +1933,9 @@
   notproduct(bool, ExecuteInternalVMTests, false,                           \
           "Enable execution of internal VM tests.")                         \
                                                                             \
+  notproduct(bool, VerboseInternalVMTests, false,                           \
+          "Turn on logging for internal VM tests.")                         \
+                                                                            \
   product_pd(bool, UseTLAB, "Use thread-local object allocation")           \
                                                                             \
   product_pd(bool, ResizeTLAB,                                              \
--- a/hotspot/src/share/vm/runtime/mutexLocker.cpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/mutexLocker.cpp	Fri Aug 30 00:19:42 2013 -0700
@@ -124,13 +124,15 @@
 
 Mutex*   Management_lock              = NULL;
 Monitor* Service_lock                 = NULL;
-Mutex*   Stacktrace_lock              = NULL;
+Monitor* PeriodicTask_lock            = NULL;
 
-Monitor* JfrQuery_lock                = NULL;
+#ifdef INCLUDE_TRACE
+Mutex*   JfrStacktrace_lock           = NULL;
 Monitor* JfrMsg_lock                  = NULL;
 Mutex*   JfrBuffer_lock               = NULL;
 Mutex*   JfrStream_lock               = NULL;
-Monitor* PeriodicTask_lock            = NULL;
+Mutex*   JfrThreadGroups_lock         = NULL;
+#endif
 
 #define MAX_NUM_MUTEX 128
 static Monitor * _mutex_array[MAX_NUM_MUTEX];
@@ -206,7 +208,6 @@
   def(Patching_lock                , Mutex  , special,     true ); // used for safepointing and code patching.
   def(ObjAllocPost_lock            , Monitor, special,     false);
   def(Service_lock                 , Monitor, special,     true ); // used for service thread operations
-  def(Stacktrace_lock              , Mutex,   special,     true ); // used for JFR stacktrace database
   def(JmethodIdCreation_lock       , Mutex  , leaf,        true ); // used for creating jmethodIDs.
 
   def(SystemDictionary_lock        , Monitor, leaf,        true ); // lookups done by VM thread
@@ -272,11 +273,16 @@
   def(Debug3_lock                  , Mutex  , nonleaf+4,   true );
   def(ProfileVM_lock               , Monitor, special,   false); // used for profiling of the VMThread
   def(CompileThread_lock           , Monitor, nonleaf+5,   false );
+  def(PeriodicTask_lock            , Monitor, nonleaf+5,   true);
 
+#ifdef INCLUDE_TRACE
   def(JfrMsg_lock                  , Monitor, leaf,        true);
   def(JfrBuffer_lock               , Mutex,   nonleaf+1,   true);
+  def(JfrThreadGroups_lock         , Mutex,   nonleaf+1,   true);
   def(JfrStream_lock               , Mutex,   nonleaf+2,   true);
-  def(PeriodicTask_lock            , Monitor, nonleaf+5,   true);
+  def(JfrStacktrace_lock           , Mutex,   special,     true );
+#endif
+
 }
 
 GCMutexLocker::GCMutexLocker(Monitor * mutex) {
--- a/hotspot/src/share/vm/runtime/mutexLocker.hpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/mutexLocker.hpp	Fri Aug 30 00:19:42 2013 -0700
@@ -137,13 +137,15 @@
 
 extern Mutex*   Management_lock;                 // a lock used to serialize JVM management
 extern Monitor* Service_lock;                    // a lock used for service thread operation
-extern Mutex*   Stacktrace_lock;                 // used to guard access to the stacktrace table
+extern Monitor* PeriodicTask_lock;               // protects the periodic task structure
 
-extern Monitor* JfrQuery_lock;                   // protects JFR use
+#ifdef INCLUDE_TRACE
+extern Mutex*   JfrStacktrace_lock;              // used to guard access to the JFR stacktrace table
 extern Monitor* JfrMsg_lock;                     // protects JFR messaging
 extern Mutex*   JfrBuffer_lock;                  // protects JFR buffer operations
 extern Mutex*   JfrStream_lock;                  // protects JFR stream access
-extern Monitor* PeriodicTask_lock;               // protects the periodic task structure
+extern Mutex*   JfrThreadGroups_lock;            // protects JFR access to Thread Groups
+#endif
 
 // A MutexLocker provides mutual exclusion with respect to a given mutex
 // for the scope which contains the locker.  The lock is an OS lock, not
--- a/hotspot/src/share/vm/runtime/os.cpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/os.cpp	Fri Aug 30 00:19:42 2013 -0700
@@ -443,6 +443,67 @@
   return _native_java_library;
 }
 
+/*
+ * Support for finding Agent_On(Un)Load/Attach<_lib_name> if it exists.
+ * If check_lib == true then we are looking for an
+ * Agent_OnLoad_lib_name or Agent_OnAttach_lib_name function to determine if
+ * this library is statically linked into the image.
+ * If check_lib == false then we will look for the appropriate symbol in the
+ * executable if agent_lib->is_static_lib() == true or in the shared library
+ * referenced by 'handle'.
+ */
+void* os::find_agent_function(AgentLibrary *agent_lib, bool check_lib,
+                              const char *syms[], size_t syms_len) {
+  const char *lib_name;
+  void *handle = agent_lib->os_lib();
+  void *entryName = NULL;
+  char *agent_function_name;
+  size_t i;
+
+  // If checking then use the agent name otherwise test is_static_lib() to
+  // see how to process this lookup
+  lib_name = ((check_lib || agent_lib->is_static_lib()) ? agent_lib->name() : NULL);
+  for (i = 0; i < syms_len; i++) {
+    agent_function_name = build_agent_function_name(syms[i], lib_name, agent_lib->is_absolute_path());
+    if (agent_function_name == NULL) {
+      break;
+    }
+    entryName = dll_lookup(handle, agent_function_name);
+    FREE_C_HEAP_ARRAY(char, agent_function_name, mtThread);
+    if (entryName != NULL) {
+      break;
+    }
+  }
+  return entryName;
+}
+
+// See if the passed in agent is statically linked into the VM image.
+bool os::find_builtin_agent(AgentLibrary *agent_lib, const char *syms[],
+                            size_t syms_len) {
+  void *ret;
+  void *proc_handle;
+  void *save_handle;
+
+  if (agent_lib->name() == NULL) {
+    return false;
+  }
+  proc_handle = get_default_process_handle();
+  // Check for Agent_OnLoad/Attach_lib_name function
+  save_handle = agent_lib->os_lib();
+  // We want to look in this process' symbol table.
+  agent_lib->set_os_lib(proc_handle);
+  ret = find_agent_function(agent_lib, true, syms, syms_len);
+  agent_lib->set_os_lib(save_handle);
+  if (ret != NULL) {
+    // Found an entry point like Agent_OnLoad_lib_name so we have a static agent
+    agent_lib->set_os_lib(proc_handle);
+    agent_lib->set_valid();
+    agent_lib->set_static_lib(true);
+    return true;
+  }
+  return false;
+}
+
 // --------------------- heap allocation utilities ---------------------
 
 char *os::strdup(const char *str, MEMFLAGS flags) {
--- a/hotspot/src/share/vm/runtime/os.hpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/os.hpp	Fri Aug 30 00:19:42 2013 -0700
@@ -46,6 +46,8 @@
 # include <setjmp.h>
 #endif
 
+class AgentLibrary;
+
 // os defines the interface to operating system; this includes traditional
 // OS services (time, I/O) as well as other functionality with system-
 // dependent code.
@@ -328,8 +330,8 @@
 
   static char*  non_memory_address_word();
   // reserve, commit and pin the entire memory region
-  static char*  reserve_memory_special(size_t size, char* addr = NULL,
-                bool executable = false);
+  static char*  reserve_memory_special(size_t size, size_t alignment,
+                                       char* addr, bool executable);
   static bool   release_memory_special(char* addr, size_t bytes);
   static void   large_page_init();
   static size_t large_page_size();
@@ -537,6 +539,17 @@
   // Unload library
   static void  dll_unload(void *lib);
 
+  // Return the handle of this process
+  static void* get_default_process_handle();
+
+  // Check for static linked agent library
+  static bool find_builtin_agent(AgentLibrary *agent_lib, const char *syms[],
+                                 size_t syms_len);
+
+  // Find agent entry point
+  static void *find_agent_function(AgentLibrary *agent_lib, bool check_lib,
+                                   const char *syms[], size_t syms_len);
+
   // Print out system information; they are called by fatal error handler.
   // Output format may be different on different platforms.
   static void print_os_info(outputStream* st);
@@ -806,6 +819,11 @@
   // ResumeThread call)
   static void pause();
 
+  // Builds a platform dependent Agent_OnLoad_<libname> function name
+  // which is used to find statically linked in agents.
+  static char*  build_agent_function_name(const char *sym, const char *cname,
+                                          bool is_absolute_path);
+
   class SuspendedThreadTaskContext {
   public:
     SuspendedThreadTaskContext(Thread* thread, void *ucontext) : _thread(thread), _ucontext(ucontext) {}
--- a/hotspot/src/share/vm/runtime/thread.cpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Fri Aug 30 00:19:42 2013 -0700
@@ -3696,15 +3696,18 @@
 // num_symbol_entries must be passed-in since only the caller knows the number of symbols in the array.
 static OnLoadEntry_t lookup_on_load(AgentLibrary* agent, const char *on_load_symbols[], size_t num_symbol_entries) {
   OnLoadEntry_t on_load_entry = NULL;
-  void *library = agent->os_lib();  // check if we have looked it up before
-
-  if (library == NULL) {
+  void *library = NULL;
+
+  if (!agent->valid()) {
     char buffer[JVM_MAXPATHLEN];
     char ebuf[1024];
     const char *name = agent->name();
     const char *msg = "Could not find agent library ";
 
-    if (agent->is_absolute_path()) {
+    // First check to see if agent is statcally linked into executable
+    if (os::find_builtin_agent(agent, on_load_symbols, num_symbol_entries)) {
+      library = agent->os_lib();
+    } else if (agent->is_absolute_path()) {
       library = os::dll_load(name, ebuf, sizeof ebuf);
       if (library == NULL) {
         const char *sub_msg = " in absolute path, with error: ";
@@ -3738,13 +3741,15 @@
       }
     }
     agent->set_os_lib(library);
+    agent->set_valid();
   }
 
   // Find the OnLoad function.
-  for (size_t symbol_index = 0; symbol_index < num_symbol_entries; symbol_index++) {
-    on_load_entry = CAST_TO_FN_PTR(OnLoadEntry_t, os::dll_lookup(library, on_load_symbols[symbol_index]));
-    if (on_load_entry != NULL) break;
-  }
+  on_load_entry =
+    CAST_TO_FN_PTR(OnLoadEntry_t, os::find_agent_function(agent,
+                                                          false,
+                                                          on_load_symbols,
+                                                          num_symbol_entries));
   return on_load_entry;
 }
 
@@ -3819,22 +3824,23 @@
 void Threads::shutdown_vm_agents() {
   // Send any Agent_OnUnload notifications
   const char *on_unload_symbols[] = AGENT_ONUNLOAD_SYMBOLS;
+  size_t num_symbol_entries = ARRAY_SIZE(on_unload_symbols);
   extern struct JavaVM_ main_vm;
   for (AgentLibrary* agent = Arguments::agents(); agent != NULL; agent = agent->next()) {
 
     // Find the Agent_OnUnload function.
-    for (uint symbol_index = 0; symbol_index < ARRAY_SIZE(on_unload_symbols); symbol_index++) {
-      Agent_OnUnload_t unload_entry = CAST_TO_FN_PTR(Agent_OnUnload_t,
-               os::dll_lookup(agent->os_lib(), on_unload_symbols[symbol_index]));
-
-      // Invoke the Agent_OnUnload function
-      if (unload_entry != NULL) {
-        JavaThread* thread = JavaThread::current();
-        ThreadToNativeFromVM ttn(thread);
-        HandleMark hm(thread);
-        (*unload_entry)(&main_vm);
-        break;
-      }
+    Agent_OnUnload_t unload_entry = CAST_TO_FN_PTR(Agent_OnUnload_t,
+      os::find_agent_function(agent,
+      false,
+      on_unload_symbols,
+      num_symbol_entries));
+
+    // Invoke the Agent_OnUnload function
+    if (unload_entry != NULL) {
+      JavaThread* thread = JavaThread::current();
+      ThreadToNativeFromVM ttn(thread);
+      HandleMark hm(thread);
+      (*unload_entry)(&main_vm);
     }
   }
 }
--- a/hotspot/src/share/vm/runtime/virtualspace.cpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/virtualspace.cpp	Fri Aug 30 00:19:42 2013 -0700
@@ -42,8 +42,19 @@
 
 
 // ReservedSpace
+
+// Dummy constructor
+ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
+    _alignment(0), _special(false), _executable(false) {
+}
+
 ReservedSpace::ReservedSpace(size_t size) {
-  initialize(size, 0, false, NULL, 0, false);
+  size_t page_size = os::page_size_for_region(size, size, 1);
+  bool large_pages = page_size != (size_t)os::vm_page_size();
+  // Don't force the alignment to be large page aligned,
+  // since that will waste memory.
+  size_t alignment = os::vm_allocation_granularity();
+  initialize(size, alignment, large_pages, NULL, 0, false);
 }
 
 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
@@ -129,16 +140,18 @@
 
   if (special) {
 
-    base = os::reserve_memory_special(size, requested_address, executable);
+    base = os::reserve_memory_special(size, alignment, requested_address, executable);
 
     if (base != NULL) {
       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
         // OS ignored requested address. Try different address.
         return;
       }
-      // Check alignment constraints
+      // Check alignment constraints.
       assert((uintptr_t) base % alignment == 0,
-             "Large pages returned a non-aligned address");
+             err_msg("Large pages returned a non-aligned address, base: "
+                 PTR_FORMAT " alignment: " PTR_FORMAT,
+                 base, (void*)(uintptr_t)alignment));
       _special = true;
     } else {
       // failed; try to reserve regular memory below
@@ -715,4 +728,188 @@
   tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 }
 
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+#define test_log(...) \
+  do {\
+    if (VerboseInternalVMTests) { \
+      tty->print_cr(__VA_ARGS__); \
+      tty->flush(); \
+    }\
+  } while (false)
+
+class TestReservedSpace : AllStatic {
+ public:
+  static void small_page_write(void* addr, size_t size) {
+    size_t page_size = os::vm_page_size();
+
+    char* end = (char*)addr + size;
+    for (char* p = (char*)addr; p < end; p += page_size) {
+      *p = 1;
+    }
+  }
+
+  static void release_memory_for_test(ReservedSpace rs) {
+    if (rs.special()) {
+      guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
+    } else {
+      guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
+    }
+  }
+
+  static void test_reserved_space1(size_t size, size_t alignment) {
+    test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
+
+    assert(is_size_aligned(size, alignment), "Incorrect input parameters");
+
+    ReservedSpace rs(size,          // size
+                     alignment,     // alignment
+                     UseLargePages, // large
+                     NULL,          // requested_address
+                     0);            // noacces_prefix
+
+    test_log(" rs.special() == %d", rs.special());
+
+    assert(rs.base() != NULL, "Must be");
+    assert(rs.size() == size, "Must be");
+
+    assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
+    assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
+
+    if (rs.special()) {
+      small_page_write(rs.base(), size);
+    }
+
+    release_memory_for_test(rs);
+  }
+
+  static void test_reserved_space2(size_t size) {
+    test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
+
+    assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
+
+    ReservedSpace rs(size);
+
+    test_log(" rs.special() == %d", rs.special());
+
+    assert(rs.base() != NULL, "Must be");
+    assert(rs.size() == size, "Must be");
+
+    if (rs.special()) {
+      small_page_write(rs.base(), size);
+    }
+
+    release_memory_for_test(rs);
+  }
+
+  static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
+    test_log("test_reserved_space3(%p, %p, %d)",
+        (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
+
+    assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
+    assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
+
+    bool large = maybe_large && UseLargePages && size >= os::large_page_size();
+
+    ReservedSpace rs(size, alignment, large, false);
+
+    test_log(" rs.special() == %d", rs.special());
+
+    assert(rs.base() != NULL, "Must be");
+    assert(rs.size() == size, "Must be");
+
+    if (rs.special()) {
+      small_page_write(rs.base(), size);
+    }
+
+    release_memory_for_test(rs);
+  }
+
+
+  static void test_reserved_space1() {
+    size_t size = 2 * 1024 * 1024;
+    size_t ag   = os::vm_allocation_granularity();
+
+    test_reserved_space1(size,      ag);
+    test_reserved_space1(size * 2,  ag);
+    test_reserved_space1(size * 10, ag);
+  }
+
+  static void test_reserved_space2() {
+    size_t size = 2 * 1024 * 1024;
+    size_t ag = os::vm_allocation_granularity();
+
+    test_reserved_space2(size * 1);
+    test_reserved_space2(size * 2);
+    test_reserved_space2(size * 10);
+    test_reserved_space2(ag);
+    test_reserved_space2(size - ag);
+    test_reserved_space2(size);
+    test_reserved_space2(size + ag);
+    test_reserved_space2(size * 2);
+    test_reserved_space2(size * 2 - ag);
+    test_reserved_space2(size * 2 + ag);
+    test_reserved_space2(size * 3);
+    test_reserved_space2(size * 3 - ag);
+    test_reserved_space2(size * 3 + ag);
+    test_reserved_space2(size * 10);
+    test_reserved_space2(size * 10 + size / 2);
+  }
+
+  static void test_reserved_space3() {
+    size_t ag = os::vm_allocation_granularity();
+
+    test_reserved_space3(ag,      ag    , false);
+    test_reserved_space3(ag * 2,  ag    , false);
+    test_reserved_space3(ag * 3,  ag    , false);
+    test_reserved_space3(ag * 2,  ag * 2, false);
+    test_reserved_space3(ag * 4,  ag * 2, false);
+    test_reserved_space3(ag * 8,  ag * 2, false);
+    test_reserved_space3(ag * 4,  ag * 4, false);
+    test_reserved_space3(ag * 8,  ag * 4, false);
+    test_reserved_space3(ag * 16, ag * 4, false);
+
+    if (UseLargePages) {
+      size_t lp = os::large_page_size();
+
+      // Without large pages
+      test_reserved_space3(lp,     ag * 4, false);
+      test_reserved_space3(lp * 2, ag * 4, false);
+      test_reserved_space3(lp * 4, ag * 4, false);
+      test_reserved_space3(lp,     lp    , false);
+      test_reserved_space3(lp * 2, lp    , false);
+      test_reserved_space3(lp * 3, lp    , false);
+      test_reserved_space3(lp * 2, lp * 2, false);
+      test_reserved_space3(lp * 4, lp * 2, false);
+      test_reserved_space3(lp * 8, lp * 2, false);
+
+      // With large pages
+      test_reserved_space3(lp, ag * 4    , true);
+      test_reserved_space3(lp * 2, ag * 4, true);
+      test_reserved_space3(lp * 4, ag * 4, true);
+      test_reserved_space3(lp, lp        , true);
+      test_reserved_space3(lp * 2, lp    , true);
+      test_reserved_space3(lp * 3, lp    , true);
+      test_reserved_space3(lp * 2, lp * 2, true);
+      test_reserved_space3(lp * 4, lp * 2, true);
+      test_reserved_space3(lp * 8, lp * 2, true);
+    }
+  }
+
+  static void test_reserved_space() {
+    test_reserved_space1();
+    test_reserved_space2();
+    test_reserved_space3();
+  }
+};
+
+void TestReservedSpace_test() {
+  TestReservedSpace::test_reserved_space();
+}
+
+#endif // PRODUCT
+
 #endif
--- a/hotspot/src/share/vm/runtime/virtualspace.hpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/virtualspace.hpp	Fri Aug 30 00:19:42 2013 -0700
@@ -53,6 +53,7 @@
 
  public:
   // Constructor
+  ReservedSpace();
   ReservedSpace(size_t size);
   ReservedSpace(size_t size, size_t alignment, bool large,
                 char* requested_address = NULL,
--- a/hotspot/src/share/vm/services/management.cpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/services/management.cpp	Fri Aug 30 00:19:42 2013 -0700
@@ -876,8 +876,6 @@
       total_used += u.used();
       total_committed += u.committed();
 
-      // if any one of the memory pool has undefined init_size or max_size,
-      // set it to -1
       if (u.init_size() == (size_t)-1) {
         has_undefined_init_size = true;
       }
@@ -894,6 +892,15 @@
     }
   }
 
+  // if any one of the memory pool has undefined init_size or max_size,
+  // set it to -1
+  if (has_undefined_init_size) {
+    total_init = (size_t)-1;
+  }
+  if (has_undefined_max_size) {
+    total_max = (size_t)-1;
+  }
+
   MemoryUsage usage((heap ? InitialHeapSize : total_init),
                     total_used,
                     total_committed,
--- a/hotspot/src/share/vm/services/memTracker.hpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/services/memTracker.hpp	Fri Aug 30 00:19:42 2013 -0700
@@ -87,6 +87,8 @@
         MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
    static inline void record_virtual_memory_commit(address addr, size_t size,
         address pc = 0, Thread* thread = NULL) { }
+   static inline void record_virtual_memory_release(address addr, size_t size,
+        Thread* thread = NULL) { }
    static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
         Thread* thread = NULL) { }
    static inline Tracker get_realloc_tracker() { return _tkr; }
@@ -372,6 +374,13 @@
     tkr.record(addr, size, flags, pc);
   }
 
+  static inline void record_virtual_memory_release(address addr, size_t size,
+      Thread* thread = NULL) {
+    if (is_on()) {
+      Tracker tkr(Tracker::Release, thread);
+      tkr.record(addr, size);
+    }
+  }
 
   // record memory type on virtual memory base address
   static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Fri Aug 30 00:19:42 2013 -0700
@@ -402,6 +402,14 @@
 
 #define align_size_up_(size, alignment) (((size) + ((alignment) - 1)) & ~((alignment) - 1))
 
+inline bool is_size_aligned(size_t size, size_t alignment) {
+  return align_size_up_(size, alignment) == size;
+}
+
+inline bool is_ptr_aligned(void* ptr, size_t alignment) {
+  return align_size_up_((intptr_t)ptr, (intptr_t)alignment) == (intptr_t)ptr;
+}
+
 inline intptr_t align_size_up(intptr_t size, intptr_t alignment) {
   return align_size_up_(size, alignment);
 }
@@ -414,6 +422,14 @@
 
 #define is_size_aligned_(size, alignment) ((size) == (align_size_up_(size, alignment)))
 
+inline void* align_ptr_up(void* ptr, size_t alignment) {
+  return (void*)align_size_up((intptr_t)ptr, (intptr_t)alignment);
+}
+
+inline void* align_ptr_down(void* ptr, size_t alignment) {
+  return (void*)align_size_down((intptr_t)ptr, (intptr_t)alignment);
+}
+
 // Align objects by rounding up their size, in HeapWord units.
 
 #define align_object_size_(size) align_size_up_(size, MinObjAlignment)
--- a/hotspot/test/TEST.ROOT	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/test/TEST.ROOT	Fri Aug 30 00:19:42 2013 -0700
@@ -25,7 +25,8 @@
 
 # This file identifies the root of the test-suite hierarchy.
 # It also contains test-suite configuration information.
-# DO NOT EDIT without first contacting hotspot-regtest@sun.com
 
 # The list of keywords supported in this test suite
 keys=cte_test jcmd nmt regression gc
+
+groups=TEST.groups [closed/TEST.groups]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/TEST.groups	Fri Aug 30 00:19:42 2013 -0700
@@ -0,0 +1,192 @@
+#
+# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+# Profile-based Test Group Definitions
+#
+# These groups define the tests that cover the different possible runtimes:
+# - compact1, compact2, compact3, full JRE, JDK
+#
+# In addition they support testing of the minimal VM on compact1 and compact2.
+# Essentially this defines groups based around the specified API's and VM 
+# services available in the runtime.
+#
+# The groups are defined hierarchically in two forms:
+# - The need_xxx groups list all the tests that have a dependency on
+# a specific profile. This is either because it tests a feature in
+# that profile, or the test infrastructure uses a feature in that
+# profile.
+# - The primary groups are defined in terms of the other primary groups
+# combined with the needs_xxx groups (including and excluding them as
+# appropriate). For example the jre can run all tests from compact3, plus
+# those from needs_jre, but excluding those from need_jdk.
+#
+# The bottom group defines all the actual tests to be considered, simply
+# by listing the top-level test directories.
+#
+# To use a group simply list it on the jtreg command line eg:
+#   jtreg :jdk    
+# runs all tests. While
+#   jtreg :compact2  
+# runs those tests that only require compact1 and compact2 API's.
+#
+
+# Full JDK can run all tests
+#
+jdk = \
+  :jre \
+  :needs_jdk
+
+# Tests that require a full JDK to execute. Either they test a feature
+# only in the JDK or they use tools that are only in the JDK. The latter
+# can be resolved in some cases by using tools from the compile-jdk.
+#
+needs_jdk = \
+  gc/TestG1ZeroPGCTJcmdThreadPrint.java \
+  gc/metaspace/ClassMetaspaceSizeInJmapHeap.java \
+  gc/metaspace/TestMetaspacePerfCounters.java \
+  runtime/6819213/TestBootNativeLibraryPath.java \
+  runtime/6878713/Test6878713.sh \
+  runtime/6925573/SortMethodsTest.java \
+  runtime/7107135/Test7107135.sh \
+  runtime/7158988/FieldMonitor.java \
+  runtime/7194254/Test7194254.java \
+  runtime/jsig/Test8017498.sh \
+  runtime/Metaspace/FragmentMetaspace.java \
+  runtime/NMT/BaselineWithParameter.java \
+  runtime/NMT/JcmdScale.java \
+  runtime/NMT/JcmdWithNMTDisabled.java \
+  runtime/NMT/MallocTestType.java \
+  runtime/NMT/ReleaseCommittedMemory.java \
+  runtime/NMT/ShutdownTwice.java \
+  runtime/NMT/SummaryAfterShutdown.java \
+  runtime/NMT/SummarySanityCheck.java \
+  runtime/NMT/ThreadedMallocTestType.java \
+  runtime/NMT/ThreadedVirtualAllocTestType.java \
+  runtime/NMT/VirtualAllocTestType.java \
+  runtime/RedefineObject/TestRedefineObject.java \
+  serviceability/attach/AttachWithStalePidFile.java
+
+# JRE adds further tests to compact3
+#
+jre = \
+  :compact3 \
+  :needs_jre \
+ -:needs_jdk
+
+# Tests that require the full JRE
+#
+needs_jre = \
+  compiler/6852078/Test6852078.java \
+  compiler/7047069/Test7047069.java \
+  runtime/6294277/SourceDebugExtension.java
+
+# Compact 3 adds further tests to compact2
+#
+compact3 = \
+  :compact2 \
+  :needs_compact3 \
+ -:needs_jre \
+ -:needs_jdk
+
+
+# Tests that require compact3 API's
+#
+needs_compact3 = \
+  compiler/whitebox/DeoptimizeMethodTest.java \
+  compiler/whitebox/SetForceInlineMethodTest.java \
+  compiler/whitebox/SetDontInlineMethodTest.java \
+  compiler/whitebox/DeoptimizeAllTest.java \
+  compiler/whitebox/MakeMethodNotCompilableTest.java \
+  compiler/whitebox/ClearMethodStateTest.java \
+  compiler/whitebox/EnqueueMethodForCompilationTest.java \
+  compiler/whitebox/IsMethodCompilableTest.java \
+  gc/6581734/Test6581734.java \
+  gc/7072527/TestFullGCCount.java \
+  gc/7168848/HumongousAlloc.java \
+  gc/arguments/TestG1HeapRegionSize.java \
+  gc/metaspace/TestMetaspaceMemoryPool.java \
+  runtime/InternalApi/ThreadCpuTimesDeadlock.java \
+  serviceability/threads/TestFalseDeadLock.java
+
+# Compact 2 adds full VM tests
+compact2 = \
+  :compact2_minimal \
+  :compact1 \
+  :needs_full_vm_compact2 \
+ -:needs_compact3 \
+ -:needs_jre \
+ -:needs_jdk
+
+# Tests that require compact2 API's and a full VM
+#  
+needs_full_vm_compact2 =
+
+# Compact 1 adds full VM tests
+#
+compact1 = \
+  :compact1_minimal \
+  :needs_full_vm_compact1 \
+ -:needs_compact2 \
+ -:needs_full_vm_compact2 \
+ -:needs_compact3 \
+ -:needs_jre \
+ -:needs_jdk
+
+# Tests that require compact1 API's and a full VM
+#
+needs_full_vm_compact1 = \
+  runtime/NMT \
+  gc/g1/TestRegionAlignment.java \
+  gc/g1/TestShrinkToOneRegion.java \
+  gc/metaspace/G1AddMetaspaceDependency.java \
+  runtime/6929067/Test6929067.sh
+
+# Minimal VM on Compact 2 adds in some compact2 tests
+#
+compact2_minimal = \
+  :compact1_minimal \
+  :needs_compact2 \
+ -:needs_full_vm_compact2 \
+ -:needs_compact3 \
+ -:needs_jre \
+ -:needs_jdk
+
+# Tests that require compact2 API's
+#
+needs_compact2 = \
+  compiler/6589834/Test_ia32.java
+
+# All tests that run on the most minimal configuration: Minimal VM on Compact 1
+compact1_minimal = \
+  serviceability/ \
+  compiler/ \
+  testlibrary/ \
+  sanity/ \
+  runtime/ \
+  gc/ \
+ -:needs_full_vm_compact1 \
+ -:needs_full_vm_compact2 \
+ -:needs_compact2 \
+ -:needs_compact3 \
+ -:needs_jre \
+ -:needs_jdk
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/8004051/Test8004051.java	Fri Aug 30 00:19:42 2013 -0700
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8004051
+ * @bug 8005722
+ * @summary assert(_oprs_len[mode] < maxNumberOfOperands) failed: array overflow
+ *
+ * @run main/othervm -Xcomp -client Test8004051
+ */
+
+public class Test8004051 {
+    public static void main(String[] argv) {
+        Object o = new Object();
+        fillPrimRect(1.1f, 1.2f, 1.3f, 1.4f,
+                     o, o,
+                     1.5f, 1.6f, 1.7f, 1.8f,
+                     2.0f, 2.1f, 2.2f, 2.3f,
+                     2.4f, 2.5f, 2.6f, 2.7f,
+                     100, 101);
+        System.out.println("Test passed, test did not assert");
+    }
+
+    static boolean fillPrimRect(float x, float y, float w, float h,
+                                Object rectTex, Object wrapTex,
+                                float bx, float by, float bw, float bh,
+                                float f1, float f2, float f3, float f4,
+                                float f5, float f6, float f7, float f8,
+                                int i1, int i2 ) {
+        System.out.println(x + " " + y + " " + w + " " + h + " " +
+                           bx + " " + by + " " + bw + " " + bh);
+        return true;
+    }
+}
--- a/hotspot/test/runtime/7051189/Xchecksig.sh	Thu Aug 29 09:41:51 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,126 +0,0 @@
-# 
-#  Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
-#  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-# 
-#  This code is free software; you can redistribute it and/or modify it
-#  under the terms of the GNU General Public License version 2 only, as
-#  published by the Free Software Foundation.
-# 
-#  This code is distributed in the hope that it will be useful, but WITHOUT
-#  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-#  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-#  version 2 for more details (a copy is included in the LICENSE file that
-#  accompanied this code).
-# 
-#  You should have received a copy of the GNU General Public License version
-#  2 along with this work; if not, write to the Free Software Foundation,
-#  Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-# 
-#  Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-#  or visit www.oracle.com if you need additional information or have any
-#  questions.
-# 
-
- 
-# @test Xchecksig.sh
-# @bug 7051189
-# @summary Need to suppress info message if -xcheck:jni used with libjsig.so
-# @run shell Xchecksig.sh
-#
-
-if [ "${TESTSRC}" = "" ]
-then
-  TESTSRC=${PWD}
-  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
-fi
-echo "TESTSRC=${TESTSRC}"
-## Adding common setup Variables for running shell tests.
-. ${TESTSRC}/../../test_env.sh
-
-OS=`uname -s`
-case "$OS" in
-  Windows_* | CYGWIN_* )
-    printf "Not testing libjsig.so on Windows. PASSED.\n "
-    exit 0
-    ;;
-esac
-
-JAVA=${TESTJAVA}${FS}bin${FS}java
-
-# LD_PRELOAD arch needs to match the binary we run, so run the java
-# 64-bit binary directly if we are testing 64-bit (bin/ARCH/java).
-# Check if TESTVMOPS contains -d64, but cannot use 
-# java ${TESTVMOPS} to run "java -d64"  with LD_PRELOAD.
-
-if [ ${OS} -eq "SunOS" ]
-then
-  printf  "SunOS test TESTVMOPTS = ${TESTVMOPTS}"
-  printf ${TESTVMOPTS} | grep d64 > /dev/null
-  if [ $? -eq 0 ]
-  then
-    printf "SunOS 64-bit test\n"
-    BIT_FLAG=-d64
-  fi
-fi
-
-ARCH=`uname -p`
-case $ARCH in
-  i386)
-    if [ X${BIT_FLAG} != "X" ]
-    then
-      ARCH=amd64
-      JAVA=${TESTJAVA}${FS}bin${FS}${ARCH}${FS}java
-    fi
-    ;;
-  sparc)
-    if [ X${BIT_FLAG} != "X" ]
-    then
-      ARCH=sparcv9
-      JAVA=${TESTJAVA}${FS}bin${FS}${ARCH}${FS}java
-    fi
-    ;;
-  * )
-    printf "Not testing architecture $ARCH, skipping test.\n"
-    exit 0
-  ;; 
-esac
-
-LIBJSIG=${COMPILEJAVA}${FS}jre${FS}lib${FS}${ARCH}${FS}libjsig.so
-
-# If libjsig and binary do not match, skip test.
-
-A=`file ${LIBJSIG} | awk '{ print $3 }'`
-B=`file ${JAVA}    | awk '{ print $3 }'`
-
-if [ $A -ne $B ]
-then
-  printf "Mismatching binary and library to preload, skipping test.\n"
-  exit 0
-fi
-
-if [ ! -f ${LIBJSIG} ]
-then
-  printf "Skipping test: libjsig missing for given architecture: ${LIBJSIG}\n"
-  exit 0
-fi
-# Use java -version to test, java version info appears on stderr,
-# the libjsig message we are removing appears on stdout.
-
-# grep returns zero meaning found, non-zero means not found:
-
-LD_PRELOAD=${LIBJSIG} ${JAVA} ${TESTVMOPTS} -Xcheck:jni -version 2>&1  | grep "libjsig is activated"
-if [ $? -eq 0 ]; then
-  printf "Failed: -Xcheck:jni prints message when libjsig.so is loaded.\n"
-  exit 1
-fi
-
-
-LD_PRELOAD=${LIBJSIG} ${JAVA} ${TESTVMOPTS} -Xcheck:jni -verbose:jni -version 2>&1 | grep "libjsig is activated"
-if [ $? != 0 ]; then
-  printf "Failed: -Xcheck:jni does not print message when libjsig.so is loaded and -verbose:jni is set.\n"
-  exit 1
-fi
-
-printf "PASSED\n"
-exit 0
-
--- a/hotspot/test/runtime/NMT/ThreadedVirtualAllocTestType.java	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/test/runtime/NMT/ThreadedVirtualAllocTestType.java	Fri Aug 30 00:19:42 2013 -0700
@@ -45,6 +45,13 @@
     String pid = Integer.toString(ProcessTools.getProcessId());
     ProcessBuilder pb = new ProcessBuilder();
 
+    boolean has_nmt_detail = wb.NMTIsDetailSupported();
+    if (has_nmt_detail) {
+      System.out.println("NMT detail support detected.");
+    } else {
+      System.out.println("NMT detail support not detected.");
+    }
+
     Thread reserveThread = new Thread() {
       public void run() {
         addr = wb.NMTReserveMemory(reserveSize);
@@ -58,7 +65,9 @@
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail"});
     output = new OutputAnalyzer(pb.start());
     output.shouldContain("Test (reserved=512KB, committed=0KB)");
-    output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + reserveSize) + "\\] reserved 512KB for Test");
+    if (has_nmt_detail) {
+      output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + reserveSize) + "\\] reserved 512KB for Test");
+    }
 
     Thread commitThread = new Thread() {
       public void run() {
@@ -72,7 +81,9 @@
 
     output = new OutputAnalyzer(pb.start());
     output.shouldContain("Test (reserved=512KB, committed=128KB)");
-    output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + commitSize) + "\\] committed 128KB");
+    if (has_nmt_detail) {
+      output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + commitSize) + "\\] committed 128KB");
+    }
 
     Thread uncommitThread = new Thread() {
       public void run() {
--- a/hotspot/test/runtime/NMT/VirtualAllocTestType.java	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/test/runtime/NMT/VirtualAllocTestType.java	Fri Aug 30 00:19:42 2013 -0700
@@ -46,13 +46,22 @@
     String pid = Integer.toString(ProcessTools.getProcessId());
     ProcessBuilder pb = new ProcessBuilder();
 
+    boolean has_nmt_detail = wb.NMTIsDetailSupported();
+    if (has_nmt_detail) {
+      System.out.println("NMT detail support detected.");
+    } else {
+      System.out.println("NMT detail support not detected.");
+    }
+
     addr = wb.NMTReserveMemory(reserveSize);
     mergeData();
+    pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail"});
 
-    pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail"});
     output = new OutputAnalyzer(pb.start());
     output.shouldContain("Test (reserved=256KB, committed=0KB)");
-    output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + reserveSize) + "\\] reserved 256KB for Test");
+    if (has_nmt_detail) {
+      output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + reserveSize) + "\\] reserved 256KB for Test");
+    }
 
     wb.NMTCommitMemory(addr, commitSize);
 
@@ -60,7 +69,9 @@
 
     output = new OutputAnalyzer(pb.start());
     output.shouldContain("Test (reserved=256KB, committed=128KB)");
-    output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + commitSize) + "\\] committed 128KB");
+    if (has_nmt_detail) {
+      output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + commitSize) + "\\] committed 128KB");
+    }
 
     wb.NMTUncommitMemory(addr, commitSize);
 
@@ -71,7 +82,6 @@
     output.shouldNotMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + commitSize) + "\\] committed");
 
     wb.NMTReleaseMemory(addr, reserveSize);
-
     mergeData();
 
     output = new OutputAnalyzer(pb.start());
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/XCheckJniJsig/XCheckJSig.java	Fri Aug 30 00:19:42 2013 -0700
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 7051189 8023393
+ * @summary Need to suppress info message if -Xcheck:jni is used with libjsig.so
+ * @library /testlibrary
+ * @run main XCheckJSig
+ */
+
+import java.util.*;
+import com.oracle.java.testlibrary.*;
+
+public class XCheckJSig {
+    public static void main(String args[]) throws Throwable {
+
+        System.out.println("Regression test for bugs 7051189 and 8023393");
+        if (!Platform.isSolaris() && !Platform.isLinux() && !Platform.isOSX()) {
+            System.out.println("Test only applicable on Solaris, Linux, and Mac OSX, skipping");
+            return;
+        }
+
+        String jdk_path = System.getProperty("test.jdk");
+        String os_arch = Platform.getOsArch();
+        String libjsig;
+        String env_var;
+        if (Platform.isOSX()) {
+            libjsig = jdk_path + "/jre/lib/server/libjsig.dylib";
+            env_var = "DYLD_INSERT_LIBRARIES";
+        } else {
+            libjsig = jdk_path + "/jre/lib/" + os_arch + "/libjsig.so";
+            env_var = "LD_PRELOAD";
+        }
+        String java_program;
+        if (Platform.isSolaris()) {
+            // On Solaris, need to call the 64-bit Java directly in order for
+            // LD_PRELOAD to work because libjsig.so is 64-bit.
+            java_program = jdk_path + "/jre/bin/" + os_arch + "/java";
+        } else {
+            java_program = JDKToolFinder.getJDKTool("java");
+        }
+        // If this test fails, these might be useful to know.
+        System.out.println("libjsig: " + libjsig);
+        System.out.println("osArch: " + os_arch);
+        System.out.println("java_program: " + java_program);
+
+        ProcessBuilder pb = new ProcessBuilder(java_program, "-Xcheck:jni", "-version");
+        Map<String, String> env = pb.environment();
+        env.put(env_var, libjsig);
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldNotContain("libjsig is activated");
+        output.shouldHaveExitValue(0);
+
+        pb = new ProcessBuilder(java_program, "-Xcheck:jni", "-verbose:jni", "-version");
+        env = pb.environment();
+        env.put(env_var, libjsig);
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("libjsig is activated");
+        output.shouldHaveExitValue(0);
+    }
+}
--- a/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Thu Aug 29 09:41:51 2013 -0700
+++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Fri Aug 30 00:19:42 2013 -0700
@@ -90,6 +90,7 @@
   public native void NMTUncommitMemory(long addr, long size);
   public native void NMTReleaseMemory(long addr, long size);
   public native boolean NMTWaitForDataMerge();
+  public native boolean NMTIsDetailSupported();
 
   // Compiler
   public native void    deoptimizeAll();