Merge
authornever
Fri, 06 May 2011 11:36:25 -0700
changeset 9450 80b5a03ca4c1
parent 9449 b2c921e1f46d (current diff)
parent 9433 474a6834bc11 (diff)
child 9451 b7d21c2e7c7a
child 9631 9782bd698ffe
Merge
hotspot/src/share/vm/runtime/arguments.cpp
--- a/hotspot/agent/src/os/solaris/proc/libproc.h	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/agent/src/os/solaris/proc/libproc.h	Fri May 06 11:36:25 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -420,7 +420,22 @@
 /*
  * Stack frame iteration interface.
  */
+#ifdef SOLARIS_11_B159_OR_LATER
+/* building on Nevada-B159 or later so define the new callback */
+typedef int proc_stack_f(
+    void *,             /* the cookie given to Pstack_iter() */
+    const prgregset_t,  /* the frame's registers */
+    uint_t,             /* argc for the frame's function */
+    const long *,       /* argv for the frame's function */
+    int,                /* bitwise flags describing the frame (see below) */
+    int);               /* a signal number */
+
+#define PR_SIGNAL_FRAME    1    /* called by a signal handler */
+#define PR_FOUND_SIGNAL    2    /* we found the corresponding signal number */
+#else
+/* building on Nevada-B158 or earlier so define the old callback */
 typedef int proc_stack_f(void *, const prgregset_t, uint_t, const long *);
+#endif
 
 extern int Pstack_iter(struct ps_prochandle *,
     const prgregset_t, proc_stack_f *, void *);
--- a/hotspot/agent/src/os/solaris/proc/salibproc.h	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/agent/src/os/solaris/proc/salibproc.h	Fri May 06 11:36:25 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -101,7 +101,23 @@
 /*
  * Stack frame iteration interface.
  */
+#ifdef SOLARIS_11_B159_OR_LATER
+/* building on Nevada-B159 or later so define the new callback */
+typedef int proc_stack_f(
+    void *,             /* the cookie given to Pstack_iter() */
+    const prgregset_t,  /* the frame's registers */
+    uint_t,             /* argc for the frame's function */
+    const long *,       /* argv for the frame's function */
+    int,                /* bitwise flags describing the frame (see below) */
+    int);               /* a signal number */
+
+#define PR_SIGNAL_FRAME    1    /* called by a signal handler */
+#define PR_FOUND_SIGNAL    2    /* we found the corresponding signal number */
+#else
+/* building on Nevada-B158 or earlier so define the old callback */
 typedef int proc_stack_f(void *, const prgregset_t, uint_t, const long *);
+#endif
+
 extern int Pstack_iter(struct ps_prochandle *,
     const prgregset_t, proc_stack_f *, void *);
 
--- a/hotspot/agent/src/os/solaris/proc/saproc.cpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/agent/src/os/solaris/proc/saproc.cpp	Fri May 06 11:36:25 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,9 @@
 
 #include "salibproc.h"
 #include "sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal.h"
+#ifndef SOLARIS_11_B159_OR_LATER
+#include <sys/utsname.h>
+#endif
 #include <thread_db.h>
 #include <strings.h>
 #include <limits.h>
@@ -40,8 +43,22 @@
 #define SYMBOL_BUF_SIZE  256
 #define ERR_MSG_SIZE     (PATH_MAX + 256)
 
-// debug mode
+// debug modes
 static int _libsaproc_debug = 0;
+#ifndef SOLARIS_11_B159_OR_LATER
+static bool _Pstack_iter_debug = false;
+
+static void dprintf_2(const char* format,...) {
+  if (_Pstack_iter_debug) {
+    va_list alist;
+
+    va_start(alist, format);
+    fputs("Pstack_iter DEBUG: ", stderr);
+    vfprintf(stderr, format, alist);
+    va_end(alist);
+  }
+}
+#endif // !SOLARIS_11_B159_OR_LATER
 
 static void print_debug(const char* format,...) {
   if (_libsaproc_debug) {
@@ -450,6 +467,7 @@
   return 0;
 }
 
+// Pstack_iter() proc_stack_f callback prior to Nevada-B159
 static int
 fill_cframe_list(void *cd, const prgregset_t regs, uint_t argc, const long *argv) {
   DebuggerWith2Objects* dbgo2 = (DebuggerWith2Objects*) cd;
@@ -472,6 +490,14 @@
   return 0;
 }
 
+// Pstack_iter() proc_stack_f callback in Nevada-B159 or later
+/*ARGSUSED*/
+static int
+wrapper_fill_cframe_list(void *cd, const prgregset_t regs, uint_t argc,
+                         const long *argv, int frame_flags, int sig) {
+  return(fill_cframe_list(cd, regs, argc, argv));
+}
+
 // part of the class sharing workaround
 
 // FIXME: !!HACK ALERT!!
@@ -970,6 +996,11 @@
                    TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
 }
 
+#ifndef SOLARIS_11_B159_OR_LATER
+// building on Nevada-B158 or earlier so more hoops to jump through
+static bool has_newer_Pstack_iter = false;  // older version by default
+#endif
+
 /*
  * Class:       sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal
  * Method:      fillCFrameList0
@@ -997,7 +1028,24 @@
 
   env->ReleaseLongArrayElements(regsArray, ptr, JNI_ABORT);
   CHECK_EXCEPTION_(0);
-  Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs, fill_cframe_list, &dbgo2);
+
+#ifdef SOLARIS_11_B159_OR_LATER
+  // building on Nevada-B159 or later so use the new callback
+  Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs,
+              wrapper_fill_cframe_list, &dbgo2);
+#else
+  // building on Nevada-B158 or earlier so figure out which callback to use
+
+  if (has_newer_Pstack_iter) {
+    // Since we're building on Nevada-B158 or earlier, we have to
+    // cast wrapper_fill_cframe_list to make the compiler happy.
+    Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs,
+                (proc_stack_f *)wrapper_fill_cframe_list, &dbgo2);
+  } else {
+    Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs,
+                fill_cframe_list, &dbgo2);
+  }
+#endif // SOLARIS_11_B159_OR_LATER
   return dbgo2.obj;
 }
 
@@ -1218,6 +1266,102 @@
   return res;
 }
 
+#ifndef SOLARIS_11_B159_OR_LATER
+// Determine if the OS we're running on has the newer version
+// of libproc's Pstack_iter.
+//
+// Set env var PSTACK_ITER_DEBUG=true to debug this logic.
+// Set env var PSTACK_ITER_DEBUG_RELEASE to simulate a 'release' value.
+// Set env var PSTACK_ITER_DEBUG_VERSION to simulate a 'version' value.
+//
+// frankenputer 'uname -r -v': 5.10 Generic_141445-09
+// jurassic 'uname -r -v':     5.11 snv_164
+// lonepeak 'uname -r -v':     5.11 snv_127
+//
+static void set_has_newer_Pstack_iter(JNIEnv *env) {
+  static bool done_set = false;
+
+  if (done_set) {
+    // already set has_newer_Pstack_iter
+    return;
+  }
+
+  struct utsname name;
+  if (uname(&name) == -1) {
+    THROW_NEW_DEBUGGER_EXCEPTION("uname() failed!");
+  }
+  dprintf_2("release='%s'  version='%s'\n", name.release, name.version);
+
+  if (_Pstack_iter_debug) {
+    char *override = getenv("PSTACK_ITER_DEBUG_RELEASE");
+    if (override != NULL) {
+      strncpy(name.release, override, SYS_NMLN - 1);
+      name.release[SYS_NMLN - 2] = '\0';
+      dprintf_2("overriding with release='%s'\n", name.release);
+    }
+    override = getenv("PSTACK_ITER_DEBUG_VERSION");
+    if (override != NULL) {
+      strncpy(name.version, override, SYS_NMLN - 1);
+      name.version[SYS_NMLN - 2] = '\0';
+      dprintf_2("overriding with version='%s'\n", name.version);
+    }
+  }
+
+  // the major number corresponds to the old SunOS major number
+  int major = atoi(name.release);
+  if (major >= 6) {
+    dprintf_2("release is SunOS 6 or later\n");
+    has_newer_Pstack_iter = true;
+    done_set = true;
+    return;
+  }
+  if (major < 5) {
+    dprintf_2("release is SunOS 4 or earlier\n");
+    done_set = true;
+    return;
+  }
+
+  // some SunOS 5.* build so now check for Solaris versions
+  char *dot = strchr(name.release, '.');
+  int minor = 0;
+  if (dot != NULL) {
+    // release is major.minor format
+    *dot = NULL;
+    minor = atoi(dot + 1);
+  }
+
+  if (minor <= 10) {
+    dprintf_2("release is Solaris 10 or earlier\n");
+    done_set = true;
+    return;
+  } else if (minor >= 12) {
+    dprintf_2("release is Solaris 12 or later\n");
+    has_newer_Pstack_iter = true;
+    done_set = true;
+    return;
+  }
+
+  // some Solaris 11 build so now check for internal build numbers
+  if (strncmp(name.version, "snv_", 4) != 0) {
+    dprintf_2("release is Solaris 11 post-GA or later\n");
+    has_newer_Pstack_iter = true;
+    done_set = true;
+    return;
+  }
+
+  // version begins with "snv_" so a pre-GA build of Solaris 11
+  int build = atoi(&name.version[4]);
+  if (build >= 159) {
+    dprintf_2("release is Nevada-B159 or later\n");
+    has_newer_Pstack_iter = true;
+  } else {
+    dprintf_2("release is Nevada-B158 or earlier\n");
+  }
+
+  done_set = true;
+}
+#endif // !SOLARIS_11_B159_OR_LATER
+
 /*
  * Class:       sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal
  * Method:      initIDs
@@ -1237,6 +1381,14 @@
   if (libproc_handle == 0)
      THROW_NEW_DEBUGGER_EXCEPTION("can't load libproc.so, if you are using Solaris 5.7 or below, copy libproc.so from 5.8!");
 
+#ifndef SOLARIS_11_B159_OR_LATER
+  _Pstack_iter_debug = getenv("PSTACK_ITER_DEBUG") != NULL;
+
+  set_has_newer_Pstack_iter(env);
+  CHECK_EXCEPTION;
+  dprintf_2("has_newer_Pstack_iter=%d\n", has_newer_Pstack_iter);
+#endif
+
   p_ps_prochandle_ID = env->GetFieldID(clazz, "p_ps_prochandle", "J");
   CHECK_EXCEPTION;
 
--- a/hotspot/make/altsrc.make	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/make/altsrc.make	Fri May 06 11:36:25 2011 -0700
@@ -24,7 +24,8 @@
 
 # This file defines variables and macros which are used in the makefiles to 
 # allow distributions to augment or replace common hotspot code with 
-# distribution-specific source files.
+# distribution-specific source files. This capability is disabled when
+# an OPENJDK build is requested, unless HS_ALT_SRC_REL has been set externally.
 
 # Requires: GAMMADIR
 # Provides:
@@ -33,14 +34,17 @@
 
 HS_COMMON_SRC_REL=src
 
-# This needs to be changed to a more generic location, but we keep it as this 
-# for now for compatibility
-HS_ALT_SRC_REL=src/closed
+ifneq ($(OPENJDK),true)
+  # This needs to be changed to a more generic location, but we keep it 
+  # as this for now for compatibility
+  HS_ALT_SRC_REL=src/closed
+else
+  HS_ALT_SRC_REL=NO_SUCH_PATH
+endif
 
 HS_COMMON_SRC=$(GAMMADIR)/$(HS_COMMON_SRC_REL)
 HS_ALT_SRC=$(GAMMADIR)/$(HS_ALT_SRC_REL)
 
-
 ## altsrc-equiv 
 # 
 # Convert a common source path to an alternative source path
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/make/jdk6_hotspot_distro	Fri May 06 11:36:25 2011 -0700
@@ -0,0 +1,32 @@
+# 
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+# 
+
+#
+# This file format must remain compatible with both
+# GNU Makefile and Microsoft nmake formats.
+#
+
+# Don't put quotes (fail windows build).
+HOTSPOT_VM_DISTRO=Java HotSpot(TM)
+COMPANY_NAME=Sun Microsystems, Inc.
+PRODUCT_NAME=Java(TM) Platform SE
--- a/hotspot/make/linux/makefiles/gcc.make	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/make/linux/makefiles/gcc.make	Fri May 06 11:36:25 2011 -0700
@@ -205,7 +205,7 @@
 SHARED_FLAG = -shared
 
 # Keep symbols even they are not used
-AOUT_FLAGS += -export-dynamic
+AOUT_FLAGS += -Xlinker -export-dynamic
 
 #------------------------------------------------------------------------
 # Debug flags
--- a/hotspot/make/linux/makefiles/vm.make	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/make/linux/makefiles/vm.make	Fri May 06 11:36:25 2011 -0700
@@ -102,6 +102,10 @@
 CFLAGS += $(EXTRA_CFLAGS)
 LFLAGS += $(EXTRA_CFLAGS)
 
+# Don't set excutable bit on stack segment
+# the same could be done by separate execstack command
+LFLAGS += -Xlinker -z -Xlinker noexecstack
+
 LIBS += -lm -ldl -lpthread
 
 # By default, link the *.o into the library, not the executable.
--- a/hotspot/make/solaris/makefiles/saproc.make	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/make/solaris/makefiles/saproc.make	Fri May 06 11:36:25 2011 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -56,6 +56,30 @@
 SA_LFLAGS += -mt -xnolib -norunpath
 endif
 
+# The libproc Pstack_iter() interface changed in Nevada-B159.
+# This logic needs to match
+# agent/src/os/solaris/proc/saproc.cpp: set_has_newer_Pstack_iter():
+#   - skip SunOS 4 or older
+#   - skip Solaris 10 or older
+#   - skip two digit Nevada builds
+#   - skip three digit Nevada builds thru 149
+#   - skip Nevada builds 150-158
+SOLARIS_11_B159_OR_LATER := \
+$(shell uname -r -v \
+    | sed -n ' \
+          /^[0-3]\. /b \
+          /^5\.[0-9] /b \
+          /^5\.10 /b \
+          / snv_[0-9][0-9]$/b \
+          / snv_[01][0-4][0-9]$/b \
+          / snv_15[0-8]$/b \
+          s/.*/-DSOLARIS_11_B159_OR_LATER/p \
+          ')
+
+# Uncomment the following to simulate building on Nevada-B159 or later
+# when actually building on Nevada-B158 or earlier:
+#SOLARIS_11_B159_OR_LATER=-DSOLARIS_11_B159_OR_LATER
+
 $(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
 	$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
 	  echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
@@ -68,6 +92,7 @@
 	           -I$(GENERATED)                                       \
 	           -I$(BOOT_JAVA_HOME)/include                          \
 	           -I$(BOOT_JAVA_HOME)/include/$(Platform_os_family)    \
+	           $(SOLARIS_11_B159_OR_LATER)                          \
 	           $(SASRCFILES)                                        \
 	           $(SA_LFLAGS)                                         \
 	           -o $@                                                \
--- a/hotspot/make/solaris/makefiles/sparcWorks.make	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/make/solaris/makefiles/sparcWorks.make	Fri May 06 11:36:25 2011 -0700
@@ -100,11 +100,6 @@
 
 LINK_LIB.CC/PRE_HOOK += $(JVM_CHECK_SYMBOLS) || exit 1;
 
-# Some interfaces (_lwp_create) changed with LP64 and Solaris 7
-SOLARIS_7_OR_LATER := \
-$(shell uname -r | awk -F. '{ if ($$2 >= 7) print "-DSOLARIS_7_OR_LATER"; }')
-CFLAGS += ${SOLARIS_7_OR_LATER}
-
 # New architecture options started in SS12 (5.9), we need both styles to build.
 #   The older arch options for SS11 (5.8) or older and also for /usr/ccs/bin/as.
 #   Note: default for 32bit sparc is now the same as v8plus, so the
--- a/hotspot/make/windows/build.make	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/make/windows/build.make	Fri May 06 11:36:25 2011 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -125,7 +125,25 @@
 # or make/hotspot_distro.
 !ifndef HOTSPOT_VM_DISTRO
 !if exists($(WorkSpace)\src\closed)
+
+# if the build is for JDK6 or earlier version, it should include jdk6_hotspot_distro,
+# instead of hotspot_distro.
+JDK6_OR_EARLIER=0
+!if "$(JDK_MAJOR_VERSION)" != "" && "$(JDK_MINOR_VERSION)" != "" && "$(JDK_MICRO_VERSION)" != ""
+!if $(JDK_MAJOR_VERSION) == 1 && $(JDK_MINOR_VERSION) < 7
+JDK6_OR_EARLIER=1
+!endif
+!else
+!if $(JDK_MAJOR_VER) == 1 && $(JDK_MINOR_VER) < 7
+JDK6_OR_EARLIER=1
+!endif
+!endif
+
+!if $(JDK6_OR_EARLIER) == 1
+!include $(WorkSpace)\make\jdk6_hotspot_distro
+!else
 !include $(WorkSpace)\make\hotspot_distro
+!endif
 !else
 !include $(WorkSpace)\make\openjdk_distro
 !endif
@@ -260,7 +278,7 @@
 	@ echo Variant=$(realVariant)				>> $@
 	@ echo WorkSpace=$(WorkSpace)				>> $@
 	@ echo BootStrapDir=$(BootStrapDir)			>> $@
-        @ if "$(USERNAME)" NEQ "" echo BuildUser=$(USERNAME)	>> $@
+	@ if "$(USERNAME)" NEQ "" echo BuildUser=$(USERNAME)	>> $@
 	@ echo HS_VER=$(HS_VER)					>> $@
 	@ echo HS_DOTVER=$(HS_DOTVER)				>> $@
 	@ echo HS_COMPANY=$(COMPANY_NAME)			>> $@
--- a/hotspot/src/os/linux/vm/globals_linux.hpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/os/linux/vm/globals_linux.hpp	Fri May 06 11:36:25 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,7 +47,7 @@
 // Defines Linux-specific default values. The flags are available on all
 // platforms, but they may have different default values on other platforms.
 //
-define_pd_global(bool, UseLargePages, false);
+define_pd_global(bool, UseLargePages, true);
 define_pd_global(bool, UseLargePagesIndividualAllocation, false);
 define_pd_global(bool, UseOSErrorReporting, false);
 define_pd_global(bool, UseThreadPriorities, true) ;
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Fri May 06 11:36:25 2011 -0700
@@ -2914,16 +2914,21 @@
 
 static size_t _large_page_size = 0;
 
-bool os::large_page_init() {
+void os::large_page_init() {
   if (!UseLargePages) {
     UseHugeTLBFS = false;
     UseSHM = false;
-    return false;
+    return;
   }
 
   if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) {
-    // Our user has not expressed a preference, so we'll try both.
-    UseHugeTLBFS = UseSHM = true;
+    // If UseLargePages is specified on the command line try both methods,
+    // if it's default, then try only HugeTLBFS.
+    if (FLAG_IS_DEFAULT(UseLargePages)) {
+      UseHugeTLBFS = true;
+    } else {
+      UseHugeTLBFS = UseSHM = true;
+    }
   }
 
   if (LargePageSizeInBytes) {
@@ -2978,7 +2983,6 @@
     _page_sizes[1] = default_page_size;
     _page_sizes[2] = 0;
   }
-
   UseHugeTLBFS = UseHugeTLBFS &&
                  Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
 
@@ -2988,12 +2992,6 @@
   UseLargePages = UseHugeTLBFS || UseSHM;
 
   set_coredump_filter();
-
-  // Large page support is available on 2.6 or newer kernel, some vendors
-  // (e.g. Redhat) have backported it to their 2.4 based distributions.
-  // We optimistically assume the support is available. If later it turns out
-  // not true, VM will automatically switch to use regular page size.
-  return true;
 }
 
 #ifndef SHM_HUGETLB
@@ -4118,7 +4116,7 @@
 #endif
   }
 
-  FLAG_SET_DEFAULT(UseLargePages, os::large_page_init());
+  os::large_page_init();
 
   // initialize suspend/resume support - must do this before signal_sets_init()
   if (SR_initialize() != 0) {
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Fri May 06 11:36:25 2011 -0700
@@ -3336,11 +3336,11 @@
   return true;
 }
 
-bool os::large_page_init() {
+void os::large_page_init() {
   if (!UseLargePages) {
     UseISM = false;
     UseMPSS = false;
-    return false;
+    return;
   }
 
   // print a warning if any large page related flag is specified on command line
@@ -3361,7 +3361,6 @@
             Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
 
   UseLargePages = UseISM || UseMPSS;
-  return UseLargePages;
 }
 
 bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) {
@@ -4992,7 +4991,7 @@
 #endif
 }
 
-  FLAG_SET_DEFAULT(UseLargePages, os::large_page_init());
+  os::large_page_init();
 
   // Check minimum allowable stack size for thread creation and to initialize
   // the java system classes, including StackOverflowError - depends on page
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Fri May 06 11:36:25 2011 -0700
@@ -2762,8 +2762,8 @@
   _hToken = NULL;
 }
 
-bool os::large_page_init() {
-  if (!UseLargePages) return false;
+void os::large_page_init() {
+  if (!UseLargePages) return;
 
   // print a warning if any large page related flag is specified on command line
   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
@@ -2808,7 +2808,7 @@
   }
 
   cleanup_after_large_page_init();
-  return success;
+  UseLargePages = success;
 }
 
 // On win32, one cannot release just a part of reserved memory, it's an
@@ -3561,7 +3561,7 @@
 #endif
 }
 
-  FLAG_SET_DEFAULT(UseLargePages, os::large_page_init());
+  os::large_page_init();
 
   // Setup Windows Exceptions
 
--- a/hotspot/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp	Fri May 06 11:36:25 2011 -0700
@@ -93,7 +93,7 @@
 
 inline void     OrderAccess::store_fence(jbyte*  p, jbyte  v) {
   __asm__ volatile (  "xchgb (%2),%0"
-                    : "=r" (v)
+                    : "=q" (v)
                     : "0" (v), "r" (p)
                     : "memory");
 }
@@ -155,7 +155,7 @@
 // Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
 inline void     OrderAccess::release_store_fence(volatile jbyte*  p, jbyte  v) {
   __asm__ volatile (  "xchgb (%2),%0"
-                    : "=r" (v)
+                    : "=q" (v)
                     : "0" (v), "r" (p)
                     : "memory");
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri May 06 11:36:25 2011 -0700
@@ -826,6 +826,14 @@
 void ConcurrentMark::checkpointRootsInitialPost() {
   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 
+  // If we force an overflow during remark, the remark operation will
+  // actually abort and we'll restart concurrent marking. If we always
+  // force an oveflow during remark we'll never actually complete the
+  // marking phase. So, we initilize this here, at the start of the
+  // cycle, so that at the remaining overflow number will decrease at
+  // every remark and we'll eventually not need to cause one.
+  force_overflow_stw()->init();
+
   // For each region note start of marking.
   NoteStartOfMarkHRClosure startcl;
   g1h->heap_region_iterate(&startcl);
@@ -893,27 +901,37 @@
 }
 
 /*
-   Notice that in the next two methods, we actually leave the STS
-   during the barrier sync and join it immediately afterwards. If we
-   do not do this, this then the following deadlock can occur: one
-   thread could be in the barrier sync code, waiting for the other
-   thread to also sync up, whereas another one could be trying to
-   yield, while also waiting for the other threads to sync up too.
-
-   Because the thread that does the sync barrier has left the STS, it
-   is possible to be suspended for a Full GC or an evacuation pause
-   could occur. This is actually safe, since the entering the sync
-   barrier is one of the last things do_marking_step() does, and it
-   doesn't manipulate any data structures afterwards.
-*/
+ * Notice that in the next two methods, we actually leave the STS
+ * during the barrier sync and join it immediately afterwards. If we
+ * do not do this, the following deadlock can occur: one thread could
+ * be in the barrier sync code, waiting for the other thread to also
+ * sync up, whereas another one could be trying to yield, while also
+ * waiting for the other threads to sync up too.
+ *
+ * Note, however, that this code is also used during remark and in
+ * this case we should not attempt to leave / enter the STS, otherwise
+ * we'll either hit an asseert (debug / fastdebug) or deadlock
+ * (product). So we should only leave / enter the STS if we are
+ * operating concurrently.
+ *
+ * Because the thread that does the sync barrier has left the STS, it
+ * is possible to be suspended for a Full GC or an evacuation pause
+ * could occur. This is actually safe, since the entering the sync
+ * barrier is one of the last things do_marking_step() does, and it
+ * doesn't manipulate any data structures afterwards.
+ */
 
 void ConcurrentMark::enter_first_sync_barrier(int task_num) {
   if (verbose_low())
     gclog_or_tty->print_cr("[%d] entering first barrier", task_num);
 
-  ConcurrentGCThread::stsLeave();
+  if (concurrent()) {
+    ConcurrentGCThread::stsLeave();
+  }
   _first_overflow_barrier_sync.enter();
-  ConcurrentGCThread::stsJoin();
+  if (concurrent()) {
+    ConcurrentGCThread::stsJoin();
+  }
   // at this point everyone should have synced up and not be doing any
   // more work
 
@@ -923,7 +941,12 @@
   // let task 0 do this
   if (task_num == 0) {
     // task 0 is responsible for clearing the global data structures
-    clear_marking_state();
+    // We should be here because of an overflow. During STW we should
+    // not clear the overflow flag since we rely on it being true when
+    // we exit this method to abort the pause and restart concurent
+    // marking.
+    clear_marking_state(concurrent() /* clear_overflow */);
+    force_overflow()->update();
 
     if (PrintGC) {
       gclog_or_tty->date_stamp(PrintGCDateStamps);
@@ -940,15 +963,45 @@
   if (verbose_low())
     gclog_or_tty->print_cr("[%d] entering second barrier", task_num);
 
-  ConcurrentGCThread::stsLeave();
+  if (concurrent()) {
+    ConcurrentGCThread::stsLeave();
+  }
   _second_overflow_barrier_sync.enter();
-  ConcurrentGCThread::stsJoin();
+  if (concurrent()) {
+    ConcurrentGCThread::stsJoin();
+  }
   // at this point everything should be re-initialised and ready to go
 
   if (verbose_low())
     gclog_or_tty->print_cr("[%d] leaving second barrier", task_num);
 }
 
+#ifndef PRODUCT
+void ForceOverflowSettings::init() {
+  _num_remaining = G1ConcMarkForceOverflow;
+  _force = false;
+  update();
+}
+
+void ForceOverflowSettings::update() {
+  if (_num_remaining > 0) {
+    _num_remaining -= 1;
+    _force = true;
+  } else {
+    _force = false;
+  }
+}
+
+bool ForceOverflowSettings::should_force() {
+  if (_force) {
+    _force = false;
+    return true;
+  } else {
+    return false;
+  }
+}
+#endif // !PRODUCT
+
 void ConcurrentMark::grayRoot(oop p) {
   HeapWord* addr = (HeapWord*) p;
   // We can't really check against _heap_start and _heap_end, since it
@@ -1117,6 +1170,7 @@
   _restart_for_overflow = false;
 
   size_t active_workers = MAX2((size_t) 1, parallel_marking_threads());
+  force_overflow_conc()->init();
   set_phase(active_workers, true /* concurrent */);
 
   CMConcurrentMarkingTask markingTask(this, cmThread());
@@ -1845,7 +1899,7 @@
   while (!_cleanup_list.is_empty()) {
     HeapRegion* hr = _cleanup_list.remove_head();
     assert(hr != NULL, "the list was not empty");
-    hr->rem_set()->clear();
+    hr->par_clear();
     tmp_free_list.add_as_tail(hr);
 
     // Instead of adding one region at a time to the secondary_free_list,
@@ -2703,12 +2757,16 @@
 
 }
 
-void ConcurrentMark::clear_marking_state() {
+void ConcurrentMark::clear_marking_state(bool clear_overflow) {
   _markStack.setEmpty();
   _markStack.clear_overflow();
   _regionStack.setEmpty();
   _regionStack.clear_overflow();
-  clear_has_overflown();
+  if (clear_overflow) {
+    clear_has_overflown();
+  } else {
+    assert(has_overflown(), "pre-condition");
+  }
   _finger = _heap_start;
 
   for (int i = 0; i < (int)_max_task_num; ++i) {
@@ -4279,6 +4337,15 @@
     }
   }
 
+  // If we are about to wrap up and go into termination, check if we
+  // should raise the overflow flag.
+  if (do_termination && !has_aborted()) {
+    if (_cm->force_overflow()->should_force()) {
+      _cm->set_has_overflown();
+      regular_clock_call();
+    }
+  }
+
   // We still haven't aborted. Now, let's try to get into the
   // termination protocol.
   if (do_termination && !has_aborted()) {
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Fri May 06 11:36:25 2011 -0700
@@ -316,6 +316,19 @@
   void setEmpty()   { _index = 0; clear_overflow(); }
 };
 
+class ForceOverflowSettings VALUE_OBJ_CLASS_SPEC {
+private:
+#ifndef PRODUCT
+  uintx _num_remaining;
+  bool _force;
+#endif // !defined(PRODUCT)
+
+public:
+  void init() PRODUCT_RETURN;
+  void update() PRODUCT_RETURN;
+  bool should_force() PRODUCT_RETURN_( return false; );
+};
+
 // this will enable a variety of different statistics per GC task
 #define _MARKING_STATS_       0
 // this will enable the higher verbose levels
@@ -462,6 +475,9 @@
 
   WorkGang* _parallel_workers;
 
+  ForceOverflowSettings _force_overflow_conc;
+  ForceOverflowSettings _force_overflow_stw;
+
   void weakRefsWork(bool clear_all_soft_refs);
 
   void swapMarkBitMaps();
@@ -470,7 +486,7 @@
   // task local ones; should be called during initial mark.
   void reset();
   // It resets all the marking data structures.
-  void clear_marking_state();
+  void clear_marking_state(bool clear_overflow = true);
 
   // It should be called to indicate which phase we're in (concurrent
   // mark or remark) and how many threads are currently active.
@@ -547,6 +563,22 @@
   void enter_first_sync_barrier(int task_num);
   void enter_second_sync_barrier(int task_num);
 
+  ForceOverflowSettings* force_overflow_conc() {
+    return &_force_overflow_conc;
+  }
+
+  ForceOverflowSettings* force_overflow_stw() {
+    return &_force_overflow_stw;
+  }
+
+  ForceOverflowSettings* force_overflow() {
+    if (concurrent()) {
+      return force_overflow_conc();
+    } else {
+      return force_overflow_stw();
+    }
+  }
+
 public:
   // Manipulation of the global mark stack.
   // Notice that the first mark_stack_push is CAS-based, whereas the
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri May 06 11:36:25 2011 -0700
@@ -3975,6 +3975,9 @@
 oop
 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
                                                oop old) {
+  assert(obj_in_cs(old),
+         err_msg("obj: "PTR_FORMAT" should still be in the CSet",
+                 (HeapWord*) old));
   markOop m = old->mark();
   oop forward_ptr = old->forward_to_atomic(old);
   if (forward_ptr == NULL) {
@@ -3997,7 +4000,13 @@
     }
     return old;
   } else {
-    // Someone else had a place to copy it.
+    // Forward-to-self failed. Either someone else managed to allocate
+    // space for this object (old != forward_ptr) or they beat us in
+    // self-forwarding it (old == forward_ptr).
+    assert(old == forward_ptr || !obj_in_cs(forward_ptr),
+           err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" "
+                   "should not be in the CSet",
+                   (HeapWord*) old, (HeapWord*) forward_ptr));
     return forward_ptr;
   }
 }
@@ -4308,11 +4317,10 @@
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop)) {
     oop obj = oopDesc::decode_heap_oop(heap_oop);
-    assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)),
-           "shouldn't still be in the CSet if evacuation didn't fail.");
     HeapWord* addr = (HeapWord*)obj;
-    if (_g1->is_in_g1_reserved(addr))
+    if (_g1->is_in_g1_reserved(addr)) {
       _cm->grayRoot(oop(addr));
+    }
   }
 }
 
@@ -4961,36 +4969,45 @@
 
 #ifndef PRODUCT
 class G1VerifyCardTableCleanup: public HeapRegionClosure {
+  G1CollectedHeap* _g1h;
   CardTableModRefBS* _ct_bs;
 public:
-  G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs)
-    : _ct_bs(ct_bs) { }
+  G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs)
+    : _g1h(g1h), _ct_bs(ct_bs) { }
   virtual bool doHeapRegion(HeapRegion* r) {
-    MemRegion mr(r->bottom(), r->end());
     if (r->is_survivor()) {
-      _ct_bs->verify_dirty_region(mr);
+      _g1h->verify_dirty_region(r);
     } else {
-      _ct_bs->verify_clean_region(mr);
+      _g1h->verify_not_dirty_region(r);
     }
     return false;
   }
 };
 
+void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
+  // All of the region should be clean.
+  CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
+  MemRegion mr(hr->bottom(), hr->end());
+  ct_bs->verify_not_dirty_region(mr);
+}
+
+void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
+  // We cannot guarantee that [bottom(),end()] is dirty.  Threads
+  // dirty allocated blocks as they allocate them. The thread that
+  // retires each region and replaces it with a new one will do a
+  // maximal allocation to fill in [pre_dummy_top(),end()] but will
+  // not dirty that area (one less thing to have to do while holding
+  // a lock). So we can only verify that [bottom(),pre_dummy_top()]
+  // is dirty.
+  CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
+  MemRegion mr(hr->bottom(), hr->pre_dummy_top());
+  ct_bs->verify_dirty_region(mr);
+}
+
 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
-  CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
+  CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
-    // We cannot guarantee that [bottom(),end()] is dirty.  Threads
-    // dirty allocated blocks as they allocate them. The thread that
-    // retires each region and replaces it with a new one will do a
-    // maximal allocation to fill in [pre_dummy_top(),end()] but will
-    // not dirty that area (one less thing to have to do while holding
-    // a lock). So we can only verify that [bottom(),pre_dummy_top()]
-    // is dirty. Also note that verify_dirty_region() requires
-    // mr.start() and mr.end() to be card aligned and pre_dummy_top()
-    // is not guaranteed to be.
-    MemRegion mr(hr->bottom(),
-                 ct_bs->align_to_card_boundary(hr->pre_dummy_top()));
-    ct_bs->verify_dirty_region(mr);
+    verify_dirty_region(hr);
   }
 }
 
@@ -5033,7 +5050,7 @@
   g1_policy()->record_clear_ct_time( elapsed * 1000.0);
 #ifndef PRODUCT
   if (G1VerifyCTCleanup || VerifyAfterGC) {
-    G1VerifyCardTableCleanup cleanup_verifier(ct_bs);
+    G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
     heap_region_iterate(&cleanup_verifier);
   }
 #endif
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri May 06 11:36:25 2011 -0700
@@ -970,6 +970,8 @@
   // The number of regions available for "regular" expansion.
   size_t expansion_regions() { return _expansion_regions; }
 
+  void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
+  void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
   void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
   void verify_dirty_young_regions() PRODUCT_RETURN;
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Fri May 06 11:36:25 2011 -0700
@@ -157,7 +157,6 @@
   void set_try_claimed() { _try_claimed = true; }
 
   void scanCard(size_t index, HeapRegion *r) {
-    _cards_done++;
     DirtyCardToOopClosure* cl =
       r->new_dcto_closure(_oc,
                          CardTableModRefBS::Precise,
@@ -168,17 +167,14 @@
     HeapWord* card_start = _bot_shared->address_for_index(index);
     HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
     Space *sp = SharedHeap::heap()->space_containing(card_start);
-    MemRegion sm_region;
-    if (ParallelGCThreads > 0) {
-      // first find the used area
-      sm_region = sp->used_region_at_save_marks();
-    } else {
-      // The closure is not idempotent.  We shouldn't look at objects
-      // allocated during the GC.
-      sm_region = sp->used_region_at_save_marks();
-    }
+    MemRegion sm_region = sp->used_region_at_save_marks();
     MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
-    if (!mr.is_empty()) {
+    if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) {
+      // We make the card as "claimed" lazily (so races are possible
+      // but they're benign), which reduces the number of duplicate
+      // scans (the rsets of the regions in the cset can intersect).
+      _ct_bs->set_card_claimed(index);
+      _cards_done++;
       cl->do_MemRegion(mr);
     }
   }
@@ -199,6 +195,9 @@
     HeapRegionRemSet* hrrs = r->rem_set();
     if (hrrs->iter_is_complete()) return false; // All done.
     if (!_try_claimed && !hrrs->claim_iter()) return false;
+    // If we ever free the collection set concurrently, we should also
+    // clear the card table concurrently therefore we won't need to
+    // add regions of the collection set to the dirty cards region.
     _g1h->push_dirty_cards_region(r);
     // If we didn't return above, then
     //   _try_claimed || r->claim_iter()
@@ -230,15 +229,10 @@
         _g1h->push_dirty_cards_region(card_region);
       }
 
-       // If the card is dirty, then we will scan it during updateRS.
-      if (!card_region->in_collection_set() && !_ct_bs->is_card_dirty(card_index)) {
-        // We make the card as "claimed" lazily (so races are possible but they're benign),
-        // which reduces the number of duplicate scans (the rsets of the regions in the cset
-        // can intersect).
-        if (!_ct_bs->is_card_claimed(card_index)) {
-          _ct_bs->set_card_claimed(card_index);
-          scanCard(card_index, card_region);
-        }
+      // If the card is dirty, then we will scan it during updateRS.
+      if (!card_region->in_collection_set() &&
+          !_ct_bs->is_card_dirty(card_index)) {
+        scanCard(card_index, card_region);
       }
     }
     if (!_try_claimed) {
@@ -246,8 +240,6 @@
     }
     return false;
   }
-  // Set all cards back to clean.
-  void cleanup() {_g1h->cleanUpCardTable();}
   size_t cards_done() { return _cards_done;}
   size_t cards_looked_up() { return _cards;}
 };
@@ -566,8 +558,9 @@
     update_rs_cl.set_region(r);
     HeapWord* stop_point =
       r->oops_on_card_seq_iterate_careful(scanRegion,
-                                        &filter_then_update_rs_cset_oop_cl,
-                                        false /* filter_young */);
+                                          &filter_then_update_rs_cset_oop_cl,
+                                          false /* filter_young */,
+                                          NULL  /* card_ptr */);
 
     // Since this is performed in the event of an evacuation failure, we
     // we shouldn't see a non-null stop point
@@ -735,12 +728,6 @@
                                 (OopClosure*)&mux :
                                 (OopClosure*)&update_rs_oop_cl));
 
-  // Undirty the card.
-  *card_ptr = CardTableModRefBS::clean_card_val();
-  // We must complete this write before we do any of the reads below.
-  OrderAccess::storeload();
-  // And process it, being careful of unallocated portions of TLAB's.
-
   // The region for the current card may be a young region. The
   // current card may have been a card that was evicted from the
   // card cache. When the card was inserted into the cache, we had
@@ -749,7 +736,7 @@
   // and tagged as young.
   //
   // We wish to filter out cards for such a region but the current
-  // thread, if we're running conucrrently, may "see" the young type
+  // thread, if we're running concurrently, may "see" the young type
   // change at any time (so an earlier "is_young" check may pass or
   // fail arbitrarily). We tell the iteration code to perform this
   // filtering when it has been determined that there has been an actual
@@ -759,7 +746,8 @@
   HeapWord* stop_point =
     r->oops_on_card_seq_iterate_careful(dirtyRegion,
                                         &filter_then_update_rs_oop_cl,
-                                        filter_young);
+                                        filter_young,
+                                        card_ptr);
 
   // If stop_point is non-null, then we encountered an unallocated region
   // (perhaps the unfilled portion of a TLAB.)  For now, we'll dirty the
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Fri May 06 11:36:25 2011 -0700
@@ -311,7 +311,11 @@
                                                                             \
   develop(bool, G1ExitOnExpansionFailure, false,                            \
           "Raise a fatal VM exit out of memory failure in the event "       \
-          " that heap expansion fails due to running out of swap.")
+          " that heap expansion fails due to running out of swap.")         \
+                                                                            \
+  develop(uintx, G1ConcMarkForceOverflow, 0,                                \
+          "The number of times we'll force an overflow during "             \
+          "concurrent marking")
 
 G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
 
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri May 06 11:36:25 2011 -0700
@@ -376,6 +376,17 @@
   if (clear_space) clear(SpaceDecorator::Mangle);
 }
 
+void HeapRegion::par_clear() {
+  assert(used() == 0, "the region should have been already cleared");
+  assert(capacity() == (size_t) HeapRegion::GrainBytes,
+         "should be back to normal");
+  HeapRegionRemSet* hrrs = rem_set();
+  hrrs->clear();
+  CardTableModRefBS* ct_bs =
+                   (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
+  ct_bs->clear(MemRegion(bottom(), end()));
+}
+
 // <PREDICTION>
 void HeapRegion::calc_gc_efficiency() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
@@ -600,7 +611,15 @@
 HeapRegion::
 oops_on_card_seq_iterate_careful(MemRegion mr,
                                  FilterOutOfRegionClosure* cl,
-                                 bool filter_young) {
+                                 bool filter_young,
+                                 jbyte* card_ptr) {
+  // Currently, we should only have to clean the card if filter_young
+  // is true and vice versa.
+  if (filter_young) {
+    assert(card_ptr != NULL, "pre-condition");
+  } else {
+    assert(card_ptr == NULL, "pre-condition");
+  }
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
   // If we're within a stop-world GC, then we might look at a card in a
@@ -626,6 +645,15 @@
 
   assert(!is_young(), "check value of filter_young");
 
+  // We can only clean the card here, after we make the decision that
+  // the card is not young. And we only clean the card if we have been
+  // asked to (i.e., card_ptr != NULL).
+  if (card_ptr != NULL) {
+    *card_ptr = CardTableModRefBS::clean_card_val();
+    // We must complete this write before we do any of the reads below.
+    OrderAccess::storeload();
+  }
+
   // We used to use "block_start_careful" here.  But we're actually happy
   // to update the BOT while we do this...
   HeapWord* cur = block_start(mr.start());
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri May 06 11:36:25 2011 -0700
@@ -584,6 +584,7 @@
 
   // Reset HR stuff to default values.
   void hr_clear(bool par, bool clear_space);
+  void par_clear();
 
   void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 
@@ -802,12 +803,16 @@
   HeapWord*
   object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
 
-  // In this version - if filter_young is true and the region
-  // is a young region then we skip the iteration.
+  // filter_young: if true and the region is a young region then we
+  // skip the iteration.
+  // card_ptr: if not NULL, and we decide that the card is not young
+  // and we iterate over it, we'll clean the card before we start the
+  // iteration.
   HeapWord*
   oops_on_card_seq_iterate_careful(MemRegion mr,
                                    FilterOutOfRegionClosure* cl,
-                                   bool filter_young);
+                                   bool filter_young,
+                                   jbyte* card_ptr);
 
   // A version of block start that is guaranteed to find *some* block
   // boundary at or before "p", but does not object iteration, and may
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Fri May 06 11:36:25 2011 -0700
@@ -224,6 +224,12 @@
   const size_t alignment = virtual_space()->alignment();
   size_t aligned_bytes  = align_size_up(bytes, alignment);
   size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment);
+
+  if (UseNUMA) {
+    // With NUMA we use round-robin page allocation for the old gen. Expand by at least
+    // providing a page per lgroup. Alignment is larger or equal to the page size.
+    aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
+  }
   if (aligned_bytes == 0){
     // The alignment caused the number of bytes to wrap.  An expand_by(0) will
     // return true with the implication that and expansion was done when it
--- a/hotspot/src/share/vm/interpreter/linkResolver.cpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/interpreter/linkResolver.cpp	Fri May 06 11:36:25 2011 -0700
@@ -327,6 +327,7 @@
 
   // 1. check if klass is not interface
   if (resolved_klass->is_interface()) {
+    ResourceMark rm(THREAD);
     char buf[200];
     jio_snprintf(buf, sizeof(buf), "Found interface %s, but class was expected", Klass::cast(resolved_klass())->external_name());
     THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
@@ -413,6 +414,7 @@
 
  // check if klass is interface
   if (!resolved_klass->is_interface()) {
+    ResourceMark rm(THREAD);
     char buf[200];
     jio_snprintf(buf, sizeof(buf), "Found class %s, but interface was expected", Klass::cast(resolved_klass())->external_name());
     THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
@@ -534,6 +536,7 @@
 
   // check for errors
   if (is_static != fd.is_static()) {
+    ResourceMark rm(THREAD);
     char msg[200];
     jio_snprintf(msg, sizeof(msg), "Expected %s field %s.%s", is_static ? "static" : "non-static", Klass::cast(resolved_klass())->external_name(), fd.name()->as_C_string());
     THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), msg);
@@ -631,6 +634,7 @@
 
   // check if static
   if (!resolved_method->is_static()) {
+    ResourceMark rm(THREAD);
     char buf[200];
     jio_snprintf(buf, sizeof(buf), "Expected static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
                                                       resolved_method->name(),
@@ -671,6 +675,7 @@
 
   // check if not static
   if (resolved_method->is_static()) {
+    ResourceMark rm(THREAD);
     char buf[200];
     jio_snprintf(buf, sizeof(buf),
                  "Expecting non-static method %s",
@@ -717,6 +722,7 @@
 
   // check if not static
   if (sel_method->is_static()) {
+    ResourceMark rm(THREAD);
     char buf[200];
     jio_snprintf(buf, sizeof(buf), "Expecting non-static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
                                                                                                              resolved_method->name(),
@@ -757,6 +763,7 @@
 
   // check if not static
   if (resolved_method->is_static()) {
+    ResourceMark rm(THREAD);
     char buf[200];
     jio_snprintf(buf, sizeof(buf), "Expecting non-static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
                                                                                                              resolved_method->name(),
@@ -873,6 +880,7 @@
 
   // check if receiver klass implements the resolved interface
   if (!recv_klass->is_subtype_of(resolved_klass())) {
+    ResourceMark rm(THREAD);
     char buf[200];
     jio_snprintf(buf, sizeof(buf), "Class %s does not implement the requested interface %s",
                  (Klass::cast(recv_klass()))->external_name(),
--- a/hotspot/src/share/vm/memory/allocation.cpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/memory/allocation.cpp	Fri May 06 11:36:25 2011 -0700
@@ -44,6 +44,14 @@
   return (void *) AllocateHeap(size, "CHeapObj-new");
 }
 
+void* CHeapObj::operator new (size_t size, const std::nothrow_t&  nothrow_constant) {
+  char* p = (char*) os::malloc(size);
+#ifdef ASSERT
+  if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
+#endif
+  return p;
+}
+
 void CHeapObj::operator delete(void* p){
  FreeHeap(p);
 }
--- a/hotspot/src/share/vm/memory/allocation.hpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/memory/allocation.hpp	Fri May 06 11:36:25 2011 -0700
@@ -34,6 +34,8 @@
 #include "opto/c2_globals.hpp"
 #endif
 
+#include <new>
+
 #define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1)
 #define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1))
 #define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK)
@@ -99,6 +101,7 @@
 class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
  public:
   void* operator new(size_t size);
+  void* operator new (size_t size, const std::nothrow_t&  nothrow_constant);
   void  operator delete(void* p);
   void* new_array(size_t size);
 };
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Fri May 06 11:36:25 2011 -0700
@@ -652,43 +652,37 @@
 }
 
 #ifndef PRODUCT
-class GuaranteeNotModClosure: public MemRegionClosure {
-  CardTableModRefBS* _ct;
-public:
-  GuaranteeNotModClosure(CardTableModRefBS* ct) : _ct(ct) {}
-  void do_MemRegion(MemRegion mr) {
-    jbyte* entry = _ct->byte_for(mr.start());
-    guarantee(*entry != CardTableModRefBS::clean_card,
-              "Dirty card in region that should be clean");
+void CardTableModRefBS::verify_region(MemRegion mr,
+                                      jbyte val, bool val_equals) {
+  jbyte* start    = byte_for(mr.start());
+  jbyte* end      = byte_for(mr.last());
+  bool   failures = false;
+  for (jbyte* curr = start; curr <= end; ++curr) {
+    jbyte curr_val = *curr;
+    bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
+    if (failed) {
+      if (!failures) {
+        tty->cr();
+        tty->print_cr("== CT verification failed: ["PTR_FORMAT","PTR_FORMAT"]");
+        tty->print_cr("==   %sexpecting value: %d",
+                      (val_equals) ? "" : "not ", val);
+        failures = true;
+      }
+      tty->print_cr("==   card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], "
+                    "val: %d", curr, addr_for(curr),
+                    (HeapWord*) (((size_t) addr_for(curr)) + card_size),
+                    (int) curr_val);
+    }
   }
-};
-
-void CardTableModRefBS::verify_clean_region(MemRegion mr) {
-  GuaranteeNotModClosure blk(this);
-  non_clean_card_iterate_serial(mr, &blk);
+  guarantee(!failures, "there should not have been any failures");
 }
 
-// To verify a MemRegion is entirely dirty this closure is passed to
-// dirty_card_iterate. If the region is dirty do_MemRegion will be
-// invoked only once with a MemRegion equal to the one being
-// verified.
-class GuaranteeDirtyClosure: public MemRegionClosure {
-  CardTableModRefBS* _ct;
-  MemRegion _mr;
-  bool _result;
-public:
-  GuaranteeDirtyClosure(CardTableModRefBS* ct, MemRegion mr)
-    : _ct(ct), _mr(mr), _result(false) {}
-  void do_MemRegion(MemRegion mr) {
-    _result = _mr.equals(mr);
-  }
-  bool result() const { return _result; }
-};
+void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
+  verify_region(mr, dirty_card, false /* val_equals */);
+}
 
 void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
-  GuaranteeDirtyClosure blk(this, mr);
-  dirty_card_iterate(mr, &blk);
-  guarantee(blk.result(), "Non-dirty cards in region that should be dirty");
+  verify_region(mr, dirty_card, true /* val_equals */);
 }
 #endif
 
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.hpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.hpp	Fri May 06 11:36:25 2011 -0700
@@ -475,7 +475,10 @@
   void verify();
   void verify_guard();
 
-  void verify_clean_region(MemRegion mr) PRODUCT_RETURN;
+  // val_equals -> it will check that all cards covered by mr equal val
+  // !val_equals -> it will check that all cards covered by mr do not equal val
+  void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
+  void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
   void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
 
   static size_t par_chunk_heapword_alignment() {
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp	Fri May 06 11:36:25 2011 -0700
@@ -265,8 +265,6 @@
   MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
 
   always_do_update_barrier = UseConcMarkSweepGC;
-  BlockOffsetArrayUseUnallocatedBlock =
-      BlockOffsetArrayUseUnallocatedBlock || ParallelGCThreads > 0;
 
   // Check validity of heap flags
   assert(OldSize     % min_alignment() == 0, "old space alignment");
--- a/hotspot/src/share/vm/memory/modRefBarrierSet.hpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/memory/modRefBarrierSet.hpp	Fri May 06 11:36:25 2011 -0700
@@ -100,12 +100,6 @@
   // Pass along the argument to the superclass.
   ModRefBarrierSet(int max_covered_regions) :
     BarrierSet(max_covered_regions) {}
-
-#ifndef PRODUCT
-  // Verifies that the given region contains no modified references.
-  virtual void verify_clean_region(MemRegion mr) = 0;
-#endif
-
 };
 
 #endif // SHARE_VM_MEMORY_MODREFBARRIERSET_HPP
--- a/hotspot/src/share/vm/prims/jvmti.xml	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/prims/jvmti.xml	Fri May 06 11:36:25 2011 -0700
@@ -280,10 +280,8 @@
    <!ELEMENT externallink (#PCDATA|jvmti|code|i|b|tm)*>
    <!ATTLIST externallink id CDATA #REQUIRED>
 
-   <!ELEMENT vmspeclink EMPTY>
-   <!ATTLIST vmspeclink id CDATA #IMPLIED>
-   <!ATTLIST vmspeclink name CDATA #IMPLIED>
-   <!ATTLIST vmspeclink preposition CDATA #IMPLIED>
+   <!ELEMENT vmspec EMPTY>
+   <!ATTLIST vmspec chapter CDATA #IMPLIED>
 
    <!ELEMENT internallink (#PCDATA|jvmti|code|i|b)*>
    <!ATTLIST internallink id CDATA #REQUIRED>
@@ -2285,9 +2283,8 @@
         Stack frames are referenced by depth.
         The frame at depth zero is the current frame.
         <p/>
-        Stack frames are as described in the 
-        <vmspeclink id="Overview.doc.html#17257"
-                    name="Frames section"/>.  
+        Stack frames are as described in
+        <vmspec chapter="3.6"/>,
         That is, they correspond to method 
         invocations (including native methods) but do not correspond to platform native or 
         VM internal frames.
@@ -2627,7 +2624,7 @@
         <param id="use_java_stack">
 	  <jboolean/>
 	  <description>
-	    Return the stack showing the <vmspeclink/>
+	    Return the stack showing <vmspec/>
 	    model of the stack; 
 	    otherwise, show the internal representation of the stack with
 	    inlined and optimized methods missing.  If the virtual machine
@@ -2707,7 +2704,7 @@
 	When the thread is resumed, the execution 
 	state of the thread is reset to the state
 	immediately before the called method was invoked.
-	That is (using the <vmspeclink/> terminology):
+	That is (using <vmspec/> terminology):
 	  <ul>
 	    <li>the current frame is discarded as the previous frame becomes the current one</li>
 	    <li>the operand stack is restored--the argument values are added back
@@ -2868,9 +2865,8 @@
       to return at any point during its execution.
       The method which will return early is referred to as the <i>called method</i>.
       The called method is the current method
-      (as defined by the 
-      <vmspeclink id="Overview.doc.html#17257"
-                  name="Frames section"/>) 
+      (as defined by
+      <vmspec chapter="3.6"/>) 
       for the specified thread at
       the time the function is called.
       <p/>
@@ -3576,10 +3572,8 @@
 	<field id="index">
 	  <jint/>
 	  <description>	    
-	    The index into the constant pool of the class. See the
-            <vmspeclink id="ClassFile.doc.html#20080"
-                        name="Constant Pool section"/>
-	    description.
+	    The index into the constant pool of the class. See the description in 
+      <vmspec chapter="4.4"/>.
 	  </description>
 	</field>
       </typedef>
@@ -5006,9 +5000,8 @@
 	    For references of this kind the <code>referrer_index</code>
             parameter to the <internallink id="jvmtiObjectReferenceCallback">
             jvmtiObjectReferenceCallback</internallink> is the index into
-            constant pool table of the class, starting at 1. See the
-            <vmspeclink id="ClassFile.doc.html#20080"
-                        name="Constant Pool section"/>
+            constant pool table of the class, starting at 1. See
+            <vmspec chapter="4.4"/>.
 	  </constant>
 	</constants>
 
@@ -6441,9 +6434,7 @@
 	been recorded as an initiating loader. Each 
 	class in the returned array was created by this class loader, 
 	either by defining it directly or by delegation to another class loader.
-        See the 
-        <vmspeclink id="ConstantPool.doc.html#72007"
-                    name="Creation and Loading section"/>.
+	See <vmspec chapter="5.3"/>.
 	<p/>
 	For JDK version 1.1 implementations that don't
 	recognize the distinction between initiating and defining class loaders,
@@ -6626,9 +6617,7 @@
 	For the class indicated by <code>klass</code>, return the access
 	flags
 	via <code>modifiers_ptr</code>.
-	Access flags are defined in the 
-        <vmspeclink id="ClassFile.doc.html"
-                    name="Class File Format chapter"/>.
+	Access flags are defined in <vmspec chapter="4"/>.
 	<p/>
 	If the class is an array class, then its public, private, and protected 
 	modifiers are the same as those of its component type. For arrays of 
@@ -6794,9 +6783,8 @@
       <description>
         For the class indicated by <code>klass</code>, 
         return the minor and major version numbers,
-        as defined in the
-        <vmspeclink id="ClassFile.doc.html"
-                        name="Class File Format chapter"/>.
+        as defined in
+        <vmspec chapter="4"/>. 
       </description>
       <origin>new</origin>
       <capabilities>
@@ -6839,10 +6827,8 @@
       <description>
 	For the class indicated by <code>klass</code>, 
         return the raw bytes of the constant pool in the format of the
-        <code>constant_pool</code> item of the 
-        <vmspeclink id="ClassFile.doc.html"
-                    name="Class File Format"
-                    preposition="in"/>.
+        <code>constant_pool</code> item of 
+        <vmspec chapter="4"/>.
         The format of the constant pool may differ between versions
         of the Class File Format, so, the 
         <functionlink id="GetClassVersionNumbers">minor and major 
@@ -7286,9 +7272,7 @@
 	<field id="class_bytes">
 	  <inbuf incount="class_byte_count"><uchar/></inbuf>
 	  <description>
-            Bytes defining class (in the 
-            <vmspeclink id="ClassFile.doc.html"
-                        name="Class File Format"/>)
+            Bytes defining class (in <vmspec chapter="4"/>)
 	  </description>
 	</field>
       </typedef>
@@ -7611,10 +7595,8 @@
 	<paramlink id="signature_ptr"/>.
 	<p/>
         Field signatures are defined in the JNI Specification and 
-        are referred to as 
-        <vmspeclink id="ClassFile.doc.html#14152"
-                    name="field descriptors"
-                    preposition="in"/>.
+        are referred to as <code>field descriptors</code> in
+        <vmspec chapter="4.3.2"/>.
       </description>
       <origin>jvmdiClone</origin>
       <capabilities>
@@ -7709,9 +7691,7 @@
       <description>
 	For the field indicated by <code>klass</code> and <code>field</code>
 	return the access flags via <code>modifiers_ptr</code>.
-	Access flags are defined in the 
-        <vmspeclink id="ClassFile.doc.html"
-                    name="Class File Format chapter"/>.
+	Access flags are defined in <vmspec chapter="4"/>.
       </description>
       <origin>jvmdi</origin>
       <capabilities>
@@ -7810,10 +7790,9 @@
 	return the method name via <code>name_ptr</code> and method signature via
 	<code>signature_ptr</code>.
         <p/>
-        Method signatures are defined in the JNI Specification and are referred to as
-        <vmspeclink id="ClassFile.doc.html#7035"
-                    name="method descriptors"
-                    preposition="in"/>.
+        Method signatures are defined in the JNI Specification and are 
+        referred to as <code>method descriptors</code> in 
+        <vmspec chapter="4.3.3"/>.
 	Note this is different
 	than method signatures as defined in the <i>Java Language Specification</i>.
       </description>
@@ -7902,9 +7881,7 @@
       <description>
 	For the method indicated by <code>method</code>,
 	return the access flags via <code>modifiers_ptr</code>.
-	Access flags are defined in the 
-        <vmspeclink id="ClassFile.doc.html"
-                    name="Class File Format chapter"/>.
+	Access flags are defined in <vmspec chapter="4"/>.
       </description>
       <origin>jvmdi</origin>
       <capabilities>
@@ -7941,9 +7918,7 @@
 	  including the local variables used to pass parameters to the
 	  method on its invocation. 
 	  <p/>
-	  See <code>max_locals</code> in the    
-          <vmspeclink id="ClassFile.doc.html#1546"
-                      name="Code Attribute section"/>.
+	  See <code>max_locals</code> in <vmspec chapter="4.7.3"/>.
       </description>
       <origin>jvmdi</origin>
       <capabilities>
@@ -8150,8 +8125,7 @@
 	    The local variable's type signature, encoded as a
 	    <internallink id="mUTF">modified UTF-8</internallink> string.
 	    The signature format is the same as that defined in
-            <vmspeclink id="ClassFile.doc.html#14152"
-                        name="Field Descriptors section"/>
+	    <vmspec chapter="4.3.2"/>.
 	  </description>
 	</field>
 	<field id="generic_signature">
@@ -10460,10 +10434,7 @@
       <synopsis>Add To Bootstrap Class Loader Search</synopsis>
       <description>
           This function can be used to cause instrumentation classes to be defined by the 
-          bootstrap class loader. See
-          <vmspeclink id="ConstantPool.doc.html#79383"
-                      name="Loading Using the Bootstrap Class Loader"
-                      preposition="in"/>.
+          bootstrap class loader. See <vmspec chapter="5.3.1"/>.
           After the bootstrap
 	  class loader unsuccessfully searches for a class, the specified platform-dependent 
 	  search path <paramlink id="segment"/> will be searched as well. Only one segment may be specified in 
@@ -10480,7 +10451,7 @@
           contain any classes or resources other than those to be defined by the bootstrap
           class loader for the purposes of instrumentation.
           <p/>
-          The <vmspeclink/> specifies that a subsequent attempt to resolve a symbolic
+          <vmspec/> specifies that a subsequent attempt to resolve a symbolic
           reference that the Java virtual machine has previously unsuccessfully attempted
           to resolve always fails with the same error that was thrown as a result of the
           initial resolution attempt. Consequently, if the JAR file contains an entry
@@ -10512,10 +10483,7 @@
       <synopsis>Add To System Class Loader Search</synopsis>
       <description>
 	  This function can be used to cause instrumentation classes to be
-	  defined by the system class loader. See
-          <vmspeclink id="ConstantPool.doc.html#79441"
-                      name="Loading Using a User-defined Class Loader"
-                      preposition="in"/>. 
+	  defined by the system class loader. See <vmspec chapter="5.3.2"/>.
 	  After the class loader unsuccessfully searches for a class, the specified platform-dependent search 
 	  path <paramlink id="segment"/> will be searched as well. Only one segment may be specified in the 
 	  <paramlink id="segment"/>. This function may be called multiple times to add multiple segments, the 
@@ -10536,7 +10504,7 @@
 	  which takes a single parameter of type <code>java.lang.String</code>. The method is not required 
 	  to have <code>public</code> access. 
 	  <p/>
-          The <vmspeclink/> specifies that a subsequent attempt to resolve a symbolic
+          <vmspec/> specifies that a subsequent attempt to resolve a symbolic
           reference that the Java virtual machine has previously unsuccessfully attempted
           to resolve always fails with the same error that was thrown as a result of the
           initial resolution attempt. Consequently, if the JAR file contains an entry
@@ -11438,7 +11406,7 @@
       at the finest granularity allowed by the VM. A single step event is
       generated whenever a thread reaches a new location. 
       Typically, single step events represent the completion of one VM 
-      instruction as defined in the <vmspeclink/>. However, some implementations 
+      instruction as defined in <vmspec/>. However, some implementations 
       may define locations differently. In any case the 
       <code>method</code> and <code>location</code>
       parameters  uniquely identify the current location and allow
@@ -13841,7 +13809,7 @@
       and can_get_source_debug_extension.
       PopFrame cannot have a native calling method.
       Removed incorrect statement in GetClassloaderClasses 
-      (see http://java.sun.com/docs/books/vmspec/2nd-edition/html/ConstantPool.doc.html#79383).
+      (see <vmspec chapter="4.4"/>).
   </change>
   <change date="24 July 2003" version="v79">
       XML and text fixes.
--- a/hotspot/src/share/vm/prims/jvmti.xsl	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/prims/jvmti.xsl	Fri May 06 11:36:25 2011 -0700
@@ -1039,34 +1039,14 @@
   </a>
 </xsl:template>
 
-<xsl:template match="vmspeclink">
-  <xsl:if test="count(@id)=1">
-    <a>
-      <xsl:attribute name="href">
-        <xsl:text>http://java.sun.com/docs/books/vmspec/2nd-edition/html/</xsl:text>
-        <xsl:value-of select="@id"/>
-      </xsl:attribute>
-      <xsl:value-of select="@name"/>
-    </a>
-    <xsl:text> </xsl:text>
-    <xsl:choose>
-      <xsl:when test="count(@preposition)=1">
-        <xsl:value-of select="@preposition"/>
-      </xsl:when>
-      <xsl:otherwise>
-        <xsl:text>of</xsl:text>
-      </xsl:otherwise>
-    </xsl:choose>
-    <xsl:text> the </xsl:text>
-  </xsl:if>
-  <a>
-    <xsl:attribute name="href">
-      <xsl:text>http://java.sun.com/docs/books/vmspec/</xsl:text>
-    </xsl:attribute>
-    <i>
-      <xsl:text>Java Virtual Machine Specification</xsl:text>
-    </i>
-  </a>
+<xsl:template match="vmspec">
+  <cite>
+    <xsl:text>The Java&#8482; Virtual Machine Specification</xsl:text>
+    <xsl:if test="count(@chapter)=1">
+      <xsl:text>, Chapter </xsl:text> 
+      <xsl:value-of select="@chapter"/>
+    </xsl:if>
+  </cite>
 </xsl:template>
 
 <xsl:template match="internallink">
--- a/hotspot/src/share/vm/prims/jvmtiExport.cpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiExport.cpp	Fri May 06 11:36:25 2011 -0700
@@ -1804,6 +1804,8 @@
 }
 
 void JvmtiExport::post_dynamic_code_generated_internal(const char *name, const void *code_begin, const void *code_end) {
+  assert(name != NULL && name[0] != '\0', "sanity check");
+
   JavaThread* thread = JavaThread::current();
   // In theory everyone coming thru here is in_vm but we need to be certain
   // because a callee will do a vm->native transition
--- a/hotspot/src/share/vm/prims/jvmtiImpl.cpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiImpl.cpp	Fri May 06 11:36:25 2011 -0700
@@ -38,6 +38,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/javaCalls.hpp"
+#include "runtime/os.hpp"
 #include "runtime/serviceThread.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/vframe.hpp"
@@ -939,10 +940,15 @@
   nmethodLocker::lock_nmethod(nm, true /* zombie_ok */);
   return event;
 }
+
 JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event(
       const char* name, const void* code_begin, const void* code_end) {
   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_DYNAMIC_CODE_GENERATED);
-  event._event_data.dynamic_code_generated.name = name;
+  // Need to make a copy of the name since we don't know how long
+  // the event poster will keep it around after we enqueue the
+  // deferred event and return. strdup() failure is handled in
+  // the post() routine below.
+  event._event_data.dynamic_code_generated.name = os::strdup(name);
   event._event_data.dynamic_code_generated.code_begin = code_begin;
   event._event_data.dynamic_code_generated.code_end = code_end;
   return event;
@@ -968,12 +974,19 @@
       nmethodLocker::unlock_nmethod(nm);
       break;
     }
-    case TYPE_DYNAMIC_CODE_GENERATED:
+    case TYPE_DYNAMIC_CODE_GENERATED: {
       JvmtiExport::post_dynamic_code_generated_internal(
-        _event_data.dynamic_code_generated.name,
+        // if strdup failed give the event a default name
+        (_event_data.dynamic_code_generated.name == NULL)
+          ? "unknown_code" : _event_data.dynamic_code_generated.name,
         _event_data.dynamic_code_generated.code_begin,
         _event_data.dynamic_code_generated.code_end);
+      if (_event_data.dynamic_code_generated.name != NULL) {
+        // release our copy
+        os::free((void *)_event_data.dynamic_code_generated.name);
+      }
       break;
+    }
     default:
       ShouldNotReachHere();
   }
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Fri May 06 11:36:25 2011 -0700
@@ -960,7 +960,7 @@
   // Ensure Agent_OnLoad has the correct initial values.
   // This may not be the final mode; mode may change later in onload phase.
   PropertyList_unique_add(&_system_properties, "java.vm.info",
-                          (char*)Abstract_VM_Version::vm_info_string(), false);
+                          (char*)VM_Version::vm_info_string(), false);
 
   UseInterpreter             = true;
   UseCompiler                = true;
@@ -1423,6 +1423,11 @@
       }
     }
   }
+  if (UseNUMA) {
+    if (FLAG_IS_DEFAULT(MinHeapDeltaBytes)) {
+      FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
+    }
+  }
 }
 
 void Arguments::set_g1_gc_flags() {
@@ -2379,7 +2384,6 @@
       _gc_log_filename = strdup(tail);
       FLAG_SET_CMDLINE(bool, PrintGC, true);
       FLAG_SET_CMDLINE(bool, PrintGCTimeStamps, true);
-      FLAG_SET_CMDLINE(bool, TraceClassUnloading, true);
 
     // JNI hooks
     } else if (match_option(option, "-Xcheck", &tail)) {
--- a/hotspot/src/share/vm/runtime/globals.hpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Fri May 06 11:36:25 2011 -0700
@@ -1827,7 +1827,7 @@
   develop(bool, VerifyBlockOffsetArray, false,                              \
           "Do (expensive!) block offset array verification")                \
                                                                             \
-  product(bool, BlockOffsetArrayUseUnallocatedBlock, false,                 \
+  diagnostic(bool, BlockOffsetArrayUseUnallocatedBlock, false,              \
           "Maintain _unallocated_block in BlockOffsetArray"                 \
           " (currently applicable only to CMS collector)")                  \
                                                                             \
@@ -2882,7 +2882,7 @@
           "Max. no. of lines in the stack trace for Java exceptions "       \
           "(0 means all)")                                                  \
                                                                             \
-  NOT_EMBEDDED(develop(intx, GuaranteedSafepointInterval, 1000,             \
+  NOT_EMBEDDED(diagnostic(intx, GuaranteedSafepointInterval, 1000,          \
           "Guarantee a safepoint (at least) every so many milliseconds "    \
           "(0 means none)"))                                                \
                                                                             \
--- a/hotspot/src/share/vm/runtime/os.hpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/runtime/os.hpp	Fri May 06 11:36:25 2011 -0700
@@ -274,7 +274,7 @@
   static char*  reserve_memory_special(size_t size, char* addr = NULL,
                 bool executable = false);
   static bool   release_memory_special(char* addr, size_t bytes);
-  static bool   large_page_init();
+  static void   large_page_init();
   static size_t large_page_size();
   static bool   can_commit_large_page_memory();
   static bool   can_execute_large_page_memory();
--- a/hotspot/src/share/vm/utilities/elfFile.cpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/utilities/elfFile.cpp	Fri May 06 11:36:25 2011 -0700
@@ -29,6 +29,7 @@
 #include <string.h>
 #include <stdio.h>
 #include <limits.h>
+#include <new>
 
 #include "memory/allocation.inline.hpp"
 #include "utilities/decoder.hpp"
@@ -46,7 +47,7 @@
   m_status = Decoder::no_error;
 
   int len = strlen(filepath) + 1;
-  m_filepath = NEW_C_HEAP_ARRAY(char, len);
+  m_filepath = (const char*)os::malloc(len * sizeof(char));
   if (m_filepath != NULL) {
     strcpy((char*)m_filepath, filepath);
     m_file = fopen(filepath, "r");
@@ -74,7 +75,7 @@
   }
 
   if (m_filepath != NULL) {
-    FREE_C_HEAP_ARRAY(char, m_filepath);
+    os::free((void*)m_filepath);
   }
 
   if (m_next != NULL) {
@@ -120,14 +121,14 @@
       }
       // string table
       if (shdr.sh_type == SHT_STRTAB) {
-        ElfStringTable* table = new ElfStringTable(m_file, shdr, index);
+        ElfStringTable* table = new (std::nothrow) ElfStringTable(m_file, shdr, index);
         if (table == NULL) {
           m_status = Decoder::out_of_memory;
           return false;
         }
         add_string_table(table);
       } else if (shdr.sh_type == SHT_SYMTAB || shdr.sh_type == SHT_DYNSYM) {
-        ElfSymbolTable* table = new ElfSymbolTable(m_file, shdr);
+        ElfSymbolTable* table = new (std::nothrow) ElfSymbolTable(m_file, shdr);
         if (table == NULL) {
           m_status = Decoder::out_of_memory;
           return false;
--- a/hotspot/src/share/vm/utilities/elfStringTable.cpp	Thu May 05 21:06:14 2011 -0700
+++ b/hotspot/src/share/vm/utilities/elfStringTable.cpp	Fri May 06 11:36:25 2011 -0700
@@ -27,6 +27,7 @@
 #ifndef _WINDOWS
 
 #include "memory/allocation.inline.hpp"
+#include "runtime/os.hpp"
 #include "utilities/elfStringTable.hpp"
 
 // We will try to load whole string table into memory if we can.
@@ -41,14 +42,14 @@
 
   // try to load the string table
   long cur_offset = ftell(file);
-  m_table = (char*)NEW_C_HEAP_ARRAY(char, shdr.sh_size);
+  m_table = (char*)os::malloc(sizeof(char) * shdr.sh_size);
   if (m_table != NULL) {
     // if there is an error, mark the error
     if (fseek(file, shdr.sh_offset, SEEK_SET) ||
       fread((void*)m_table, shdr.sh_size, 1, file) != 1 ||
       fseek(file, cur_offset, SEEK_SET)) {
       m_status = Decoder::file_invalid;
-      FREE_C_HEAP_ARRAY(char, m_table);
+      os::free((void*)m_table);
       m_table = NULL;
     }
   } else {
@@ -58,7 +59,7 @@
 
 ElfStringTable::~ElfStringTable() {
   if (m_table != NULL) {
-    FREE_C_HEAP_ARRAY(char, m_table);
+    os::free((void*)m_table);
   }
 
   if (m_next != NULL) {