Merge
authoramurillo
Fri, 08 Mar 2013 08:10:00 -0800
changeset 15878 f06b74113bc2
parent 15845 784b19fdb1af (current diff)
parent 15877 9dc93ed54d35 (diff)
child 15879 25c319ff4cf4
Merge
--- a/hotspot/agent/src/os/bsd/MacosxDebuggerLocal.m	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/agent/src/os/bsd/MacosxDebuggerLocal.m	Fri Mar 08 08:10:00 2013 -0800
@@ -160,7 +160,7 @@
   CHECK_EXCEPTION_(0);
 
   unsigned long alignedAddress;
-  unsigned long alignedLength;
+  unsigned long alignedLength = 0;
   kern_return_t result;
   vm_offset_t *pages;
   int *mapped;
@@ -630,7 +630,7 @@
     /* Couldn't find entry point.  error_message should contain some
      * platform dependent error message.
      */
-    THROW_NEW_DEBUGGER_EXCEPTION(error_message);
+    THROW_NEW_DEBUGGER_EXCEPTION_(error_message, (jlong)func);
   }
   return (jlong)func;
 }
--- a/hotspot/agent/src/os/linux/LinuxDebuggerLocal.c	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/agent/src/os/linux/LinuxDebuggerLocal.c	Fri Mar 08 08:10:00 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,13 @@
 #include <jni.h>
 #include "libproc.h"
 
+#include <elf.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <string.h>
+#include <limits.h>
+
 #if defined(x86_64) && !defined(amd64)
 #define amd64 1
 #endif
@@ -154,6 +161,39 @@
   }
 }
 
+
+/*
+ * Verify that a named ELF binary file (core or executable) has the same
+ * bitness as ourselves.
+ * Throw an exception if there is a mismatch or other problem.
+ *
+ * If we proceed using a mismatched debugger/debuggee, the best to hope
+ * for is a missing symbol, the worst is a crash searching for debug symbols.
+ */
+void verifyBitness(JNIEnv *env, const char *binaryName) {
+  int fd = open(binaryName, O_RDONLY);
+  if (fd < 0) {
+    THROW_NEW_DEBUGGER_EXCEPTION("cannot open binary file");
+  }
+  unsigned char elf_ident[EI_NIDENT];
+  int i = read(fd, &elf_ident, sizeof(elf_ident));
+  close(fd);
+
+  if (i < 0) {
+    THROW_NEW_DEBUGGER_EXCEPTION("cannot read binary file");
+  }
+#ifndef _LP64
+  if (elf_ident[EI_CLASS] == ELFCLASS64) {
+    THROW_NEW_DEBUGGER_EXCEPTION("debuggee is 64 bit, use 64-bit java for debugger");
+  }
+#else
+  if (elf_ident[EI_CLASS] != ELFCLASS64) {
+    THROW_NEW_DEBUGGER_EXCEPTION("debuggee is 32 bit, use 32 bit java for debugger");
+  }
+#endif
+}
+
+
 /*
  * Class:     sun_jvm_hotspot_debugger_linux_LinuxDebuggerLocal
  * Method:    attach0
@@ -162,6 +202,12 @@
 JNIEXPORT void JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLocal_attach0__I
   (JNIEnv *env, jobject this_obj, jint jpid) {
 
+  // For bitness checking, locate binary at /proc/jpid/exe
+  char buf[PATH_MAX];
+  snprintf((char *) &buf, PATH_MAX, "/proc/%d/exe", jpid);
+  verifyBitness(env, (char *) &buf);
+  CHECK_EXCEPTION;
+
   struct ps_prochandle* ph;
   if ( (ph = Pgrab(jpid)) == NULL) {
     THROW_NEW_DEBUGGER_EXCEPTION("Can't attach to the process");
@@ -187,6 +233,9 @@
   coreName_cstr = (*env)->GetStringUTFChars(env, coreName, &isCopy);
   CHECK_EXCEPTION;
 
+  verifyBitness(env, execName_cstr);
+  CHECK_EXCEPTION;
+
   if ( (ph = Pgrab_core(execName_cstr, coreName_cstr)) == NULL) {
     (*env)->ReleaseStringUTFChars(env, execName, execName_cstr);
     (*env)->ReleaseStringUTFChars(env, coreName, coreName_cstr);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/amd64/LinuxAMD64CFrame.java	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/amd64/LinuxAMD64CFrame.java	Fri Mar 08 08:10:00 2013 -0800
@@ -60,8 +60,13 @@
         return null;
       }
 
+      // Check alignment of rbp
+      if ( dbg.getAddressValue(rbp) % ADDRESS_SIZE != 0) {
+        return null;
+      }
+
       Address nextRBP = rbp.getAddressAt( 0 * ADDRESS_SIZE);
-      if (nextRBP == null) {
+      if (nextRBP == null || nextRBP.lessThanOrEqual(rbp)) {
         return null;
       }
       Address nextPC  = rbp.getAddressAt( 1 * ADDRESS_SIZE);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/x86/LinuxX86CFrame.java	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/x86/LinuxX86CFrame.java	Fri Mar 08 08:10:00 2013 -0800
@@ -61,8 +61,13 @@
         return null;
       }
 
+      // Check alignment of ebp
+      if ( dbg.getAddressValue(ebp) % ADDRESS_SIZE != 0) {
+        return null;
+      }
+
       Address nextEBP = ebp.getAddressAt( 0 * ADDRESS_SIZE);
-      if (nextEBP == null) {
+      if (nextEBP == null || nextEBP.lessThanOrEqual(ebp)) {
         return null;
       }
       Address nextPC  = ebp.getAddressAt( 1 * ADDRESS_SIZE);
--- a/hotspot/make/bsd/makefiles/gcc.make	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/make/bsd/makefiles/gcc.make	Fri Mar 08 08:10:00 2013 -0800
@@ -229,6 +229,20 @@
 CFLAGS += -DDONT_USE_PRECOMPILED_HEADER
 endif
 
+ifeq ($(OS_VENDOR), Darwin)
+  # Setting these parameters makes it an error to link to macosx APIs that are 
+  # newer than the given OS version and makes the linked binaries compatible even
+  # if built on a newer version of the OS.
+  # The expected format is X.Y.Z
+  ifeq ($(MACOSX_VERSION_MIN),)
+    MACOSX_VERSION_MIN=10.7.0
+  endif
+  # The macro takes the version with no dots, ex: 1070
+  CFLAGS += -DMAC_OS_X_VERSION_MAX_ALLOWED=$(subst .,,$(MACOSX_VERSION_MIN)) \
+            -mmacosx-version-min=$(MACOSX_VERSION_MIN)
+  LDFLAGS += -mmacosx-version-min=$(MACOSX_VERSION_MIN)
+endif
+
 #------------------------------------------------------------------------
 # Linker flags
 
--- a/hotspot/make/hotspot_version	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/make/hotspot_version	Fri Mar 08 08:10:00 2013 -0800
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=21
+HS_BUILD_NUMBER=22
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -2695,7 +2695,7 @@
   assert(thread->is_VM_thread(), "Must be VMThread");
   // read current suspend action
   int action = osthread->sr.suspend_action();
-  if (action == SR_SUSPEND) {
+  if (action == os::Bsd::SuspendResume::SR_SUSPEND) {
     suspend_save_context(osthread, siginfo, context);
 
     // Notify the suspend action is about to be completed. do_suspend()
@@ -2717,12 +2717,12 @@
     do {
       sigsuspend(&suspend_set);
       // ignore all returns until we get a resume signal
-    } while (osthread->sr.suspend_action() != SR_CONTINUE);
+    } while (osthread->sr.suspend_action() != os::Bsd::SuspendResume::SR_CONTINUE);
 
     resume_clear_context(osthread);
 
   } else {
-    assert(action == SR_CONTINUE, "unexpected sr action");
+    assert(action == os::Bsd::SuspendResume::SR_CONTINUE, "unexpected sr action");
     // nothing special to do - just leave the handler
   }
 
@@ -2776,7 +2776,7 @@
 // but this seems the normal response to library errors
 static bool do_suspend(OSThread* osthread) {
   // mark as suspended and send signal
-  osthread->sr.set_suspend_action(SR_SUSPEND);
+  osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_SUSPEND);
   int status = pthread_kill(osthread->pthread_id(), SR_signum);
   assert_status(status == 0, status, "pthread_kill");
 
@@ -2785,18 +2785,18 @@
     for (int i = 0; !osthread->sr.is_suspended(); i++) {
       os::yield_all(i);
     }
-    osthread->sr.set_suspend_action(SR_NONE);
+    osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_NONE);
     return true;
   }
   else {
-    osthread->sr.set_suspend_action(SR_NONE);
+    osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_NONE);
     return false;
   }
 }
 
 static void do_resume(OSThread* osthread) {
   assert(osthread->sr.is_suspended(), "thread should be suspended");
-  osthread->sr.set_suspend_action(SR_CONTINUE);
+  osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_CONTINUE);
 
   int status = pthread_kill(osthread->pthread_id(), SR_signum);
   assert_status(status == 0, status, "pthread_kill");
@@ -2806,7 +2806,7 @@
       os::yield_all(i);
     }
   }
-  osthread->sr.set_suspend_action(SR_NONE);
+  osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_NONE);
 }
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -3903,15 +3903,27 @@
 jlong os::current_thread_cpu_time() {
 #ifdef __APPLE__
   return os::thread_cpu_time(Thread::current(), true /* user + sys */);
+#else
+  Unimplemented();
+  return 0;
 #endif
 }
 
 jlong os::thread_cpu_time(Thread* thread) {
+#ifdef __APPLE__
+  return os::thread_cpu_time(thread, true /* user + sys */);
+#else
+  Unimplemented();
+  return 0;
+#endif
 }
 
 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
 #ifdef __APPLE__
   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
+#else
+  Unimplemented();
+  return 0;
 #endif
 }
 
@@ -3935,6 +3947,9 @@
   } else {
     return ((jlong)tinfo.user_time.seconds * 1000000000) + ((jlong)tinfo.user_time.microseconds * (jlong)1000);
   }
+#else
+  Unimplemented();
+  return 0;
 #endif
 }
 
--- a/hotspot/src/os/bsd/vm/os_bsd.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/os/bsd/vm/os_bsd.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -151,36 +151,25 @@
   // for BsdThreads are no longer needed.
   class SuspendResume {
   private:
-    volatile int _suspend_action;
+    volatile int  _suspend_action;
+    volatile jint _state;
+  public:
     // values for suspend_action:
-    #define SR_NONE               (0x00)
-    #define SR_SUSPEND            (0x01)  // suspend request
-    #define SR_CONTINUE           (0x02)  // resume request
+    enum {
+      SR_NONE              = 0x00,
+      SR_SUSPEND           = 0x01,  // suspend request
+      SR_CONTINUE          = 0x02,  // resume request
+      SR_SUSPENDED         = 0x20   // values for _state: + SR_NONE
+    };
 
-    volatile jint _state;
-    // values for _state: + SR_NONE
-    #define SR_SUSPENDED          (0x20)
-  public:
     SuspendResume() { _suspend_action = SR_NONE; _state = SR_NONE; }
 
     int suspend_action() const     { return _suspend_action; }
     void set_suspend_action(int x) { _suspend_action = x;    }
 
     // atomic updates for _state
-    void set_suspended()           {
-      jint temp, temp2;
-      do {
-        temp = _state;
-        temp2 = Atomic::cmpxchg(temp | SR_SUSPENDED, &_state, temp);
-      } while (temp2 != temp);
-    }
-    void clear_suspended()        {
-      jint temp, temp2;
-      do {
-        temp = _state;
-        temp2 = Atomic::cmpxchg(temp & ~SR_SUSPENDED, &_state, temp);
-      } while (temp2 != temp);
-    }
+    inline void set_suspended();
+    inline void clear_suspended();
     bool is_suspended()            { return _state & SR_SUSPENDED;       }
 
     #undef SR_SUSPENDED
--- a/hotspot/src/os/bsd/vm/os_bsd.inline.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/os/bsd/vm/os_bsd.inline.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -25,7 +25,6 @@
 #ifndef OS_BSD_VM_OS_BSD_INLINE_HPP
 #define OS_BSD_VM_OS_BSD_INLINE_HPP
 
-#include "runtime/atomic.hpp"
 #include "runtime/atomic.inline.hpp"
 #include "runtime/os.hpp"
 
@@ -286,4 +285,21 @@
                             const char* optval, socklen_t optlen) {
   return ::setsockopt(fd, level, optname, optval, optlen);
 }
+
+inline void os::Bsd::SuspendResume::set_suspended()           {
+  jint temp, temp2;
+  do {
+    temp = _state;
+    temp2 = Atomic::cmpxchg(temp | SR_SUSPENDED, &_state, temp);
+  } while (temp2 != temp);
+}
+
+inline void os::Bsd::SuspendResume::clear_suspended()        {
+  jint temp, temp2;
+  do {
+    temp = _state;
+    temp2 = Atomic::cmpxchg(temp & ~SR_SUSPENDED, &_state, temp);
+  } while (temp2 != temp);
+}
+
 #endif // OS_BSD_VM_OS_BSD_INLINE_HPP
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -3461,7 +3461,7 @@
   assert(thread->is_VM_thread(), "Must be VMThread");
   // read current suspend action
   int action = osthread->sr.suspend_action();
-  if (action == SR_SUSPEND) {
+  if (action == os::Linux::SuspendResume::SR_SUSPEND) {
     suspend_save_context(osthread, siginfo, context);
 
     // Notify the suspend action is about to be completed. do_suspend()
@@ -3483,12 +3483,12 @@
     do {
       sigsuspend(&suspend_set);
       // ignore all returns until we get a resume signal
-    } while (osthread->sr.suspend_action() != SR_CONTINUE);
+    } while (osthread->sr.suspend_action() != os::Linux::SuspendResume::SR_CONTINUE);
 
     resume_clear_context(osthread);
 
   } else {
-    assert(action == SR_CONTINUE, "unexpected sr action");
+    assert(action == os::Linux::SuspendResume::SR_CONTINUE, "unexpected sr action");
     // nothing special to do - just leave the handler
   }
 
@@ -3542,7 +3542,7 @@
 // but this seems the normal response to library errors
 static bool do_suspend(OSThread* osthread) {
   // mark as suspended and send signal
-  osthread->sr.set_suspend_action(SR_SUSPEND);
+  osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_SUSPEND);
   int status = pthread_kill(osthread->pthread_id(), SR_signum);
   assert_status(status == 0, status, "pthread_kill");
 
@@ -3551,18 +3551,18 @@
     for (int i = 0; !osthread->sr.is_suspended(); i++) {
       os::yield_all(i);
     }
-    osthread->sr.set_suspend_action(SR_NONE);
+    osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_NONE);
     return true;
   }
   else {
-    osthread->sr.set_suspend_action(SR_NONE);
+    osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_NONE);
     return false;
   }
 }
 
 static void do_resume(OSThread* osthread) {
   assert(osthread->sr.is_suspended(), "thread should be suspended");
-  osthread->sr.set_suspend_action(SR_CONTINUE);
+  osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_CONTINUE);
 
   int status = pthread_kill(osthread->pthread_id(), SR_signum);
   assert_status(status == 0, status, "pthread_kill");
@@ -3572,7 +3572,7 @@
       os::yield_all(i);
     }
   }
-  osthread->sr.set_suspend_action(SR_NONE);
+  osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_NONE);
 }
 
 ////////////////////////////////////////////////////////////////////////////////
--- a/hotspot/src/os/linux/vm/os_linux.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/os/linux/vm/os_linux.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -209,39 +209,27 @@
   // for LinuxThreads are no longer needed.
   class SuspendResume {
   private:
-    volatile int _suspend_action;
+    volatile int  _suspend_action;
+    volatile jint _state;
+  public:
     // values for suspend_action:
-    #define SR_NONE               (0x00)
-    #define SR_SUSPEND            (0x01)  // suspend request
-    #define SR_CONTINUE           (0x02)  // resume request
+    enum {
+      SR_NONE              = 0x00,
+      SR_SUSPEND           = 0x01,  // suspend request
+      SR_CONTINUE          = 0x02,  // resume request
+      SR_SUSPENDED         = 0x20   // values for _state: + SR_NONE
+    };
 
-    volatile jint _state;
-    // values for _state: + SR_NONE
-    #define SR_SUSPENDED          (0x20)
-  public:
     SuspendResume() { _suspend_action = SR_NONE; _state = SR_NONE; }
 
     int suspend_action() const     { return _suspend_action; }
     void set_suspend_action(int x) { _suspend_action = x;    }
 
     // atomic updates for _state
-    void set_suspended()           {
-      jint temp, temp2;
-      do {
-        temp = _state;
-        temp2 = Atomic::cmpxchg(temp | SR_SUSPENDED, &_state, temp);
-      } while (temp2 != temp);
-    }
-    void clear_suspended()        {
-      jint temp, temp2;
-      do {
-        temp = _state;
-        temp2 = Atomic::cmpxchg(temp & ~SR_SUSPENDED, &_state, temp);
-      } while (temp2 != temp);
-    }
+    inline void set_suspended();
+    inline void clear_suspended();
     bool is_suspended()            { return _state & SR_SUSPENDED;       }
 
-    #undef SR_SUSPENDED
   };
 
 private:
--- a/hotspot/src/os/linux/vm/os_linux.inline.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/os/linux/vm/os_linux.inline.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -25,7 +25,6 @@
 #ifndef OS_LINUX_VM_OS_LINUX_INLINE_HPP
 #define OS_LINUX_VM_OS_LINUX_INLINE_HPP
 
-#include "runtime/atomic.hpp"
 #include "runtime/atomic.inline.hpp"
 #include "runtime/os.hpp"
 
@@ -288,4 +287,21 @@
                             const char* optval, socklen_t optlen) {
   return ::setsockopt(fd, level, optname, optval, optlen);
 }
+
+inline void os::Linux::SuspendResume::set_suspended() {
+  jint temp, temp2;
+  do {
+    temp = _state;
+    temp2 = Atomic::cmpxchg(temp | SR_SUSPENDED, &_state, temp);
+  } while (temp2 != temp);
+}
+
+inline void os::Linux::SuspendResume::clear_suspended()        {
+  jint temp, temp2;
+  do {
+    temp = _state;
+    temp2 = Atomic::cmpxchg(temp & ~SR_SUSPENDED, &_state, temp);
+  } while (temp2 != temp);
+}
+
 #endif // OS_LINUX_VM_OS_LINUX_INLINE_HPP
--- a/hotspot/src/os/solaris/vm/os_solaris.inline.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/os/solaris/vm/os_solaris.inline.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -25,7 +25,6 @@
 #ifndef OS_SOLARIS_VM_OS_SOLARIS_INLINE_HPP
 #define OS_SOLARIS_VM_OS_SOLARIS_INLINE_HPP
 
-#include "runtime/atomic.hpp"
 #include "runtime/atomic.inline.hpp"
 #include "runtime/os.hpp"
 
--- a/hotspot/src/os/windows/vm/decoder_windows.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/os/windows/vm/decoder_windows.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "prims/jvm.h"
+#include "runtime/arguments.hpp"
 #include "decoder_windows.hpp"
 
 WindowsDecoder::WindowsDecoder() {
--- a/hotspot/src/os/windows/vm/os_windows.inline.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/os/windows/vm/os_windows.inline.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -25,7 +25,6 @@
 #ifndef OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP
 #define OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP
 
-#include "runtime/atomic.hpp"
 #include "runtime/atomic.inline.hpp"
 #include "runtime/os.hpp"
 
--- a/hotspot/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.inline.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.inline.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #ifndef OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_INLINE_HPP
 #define OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_INLINE_HPP
 
-#include "orderAccess_bsd_x86.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "vm_version_x86.hpp"
--- a/hotspot/src/os_cpu/bsd_x86/vm/orderAccess_bsd_x86.inline.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/os_cpu/bsd_x86/vm/orderAccess_bsd_x86.inline.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,8 +25,9 @@
 #ifndef OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
 #define OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
 
-#include "runtime/atomic.hpp"
+#include "runtime/atomic.inline.hpp"
 #include "runtime/orderAccess.hpp"
+#include "runtime/os.hpp"
 #include "vm_version_x86.hpp"
 
 // Implementation of class OrderAccess.
--- a/hotspot/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.inline.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.inline.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -26,7 +26,6 @@
 #ifndef OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP
 #define OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP
 
-#include "orderAccess_bsd_zero.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "vm_version_zero.hpp"
--- a/hotspot/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #ifndef OS_CPU_LINUX_SPARC_VM_ATOMIC_LINUX_SPARC_INLINE_HPP
 #define OS_CPU_LINUX_SPARC_VM_ATOMIC_LINUX_SPARC_INLINE_HPP
 
-#include "orderAccess_linux_sparc.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "vm_version_sparc.hpp"
--- a/hotspot/src/os_cpu/linux_x86/vm/atomic_linux_x86.inline.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/os_cpu/linux_x86/vm/atomic_linux_x86.inline.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #ifndef OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_INLINE_HPP
 #define OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_INLINE_HPP
 
-#include "orderAccess_linux_x86.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "vm_version_x86.hpp"
--- a/hotspot/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,8 +25,9 @@
 #ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
 #define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
 
-#include "runtime/atomic.hpp"
+#include "runtime/atomic.inline.hpp"
 #include "runtime/orderAccess.hpp"
+#include "runtime/os.hpp"
 #include "vm_version_x86.hpp"
 
 // Implementation of class OrderAccess.
--- a/hotspot/src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -26,7 +26,6 @@
 #ifndef OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP
 #define OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP
 
-#include "orderAccess_linux_zero.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "vm_version_zero.hpp"
--- a/hotspot/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #ifndef OS_CPU_SOLARIS_SPARC_VM_ATOMIC_SOLARIS_SPARC_INLINE_HPP
 #define OS_CPU_SOLARIS_SPARC_VM_ATOMIC_SOLARIS_SPARC_INLINE_HPP
 
-#include "orderAccess_solaris_sparc.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "vm_version_sparc.hpp"
--- a/hotspot/src/os_cpu/solaris_sparc/vm/orderAccess_solaris_sparc.inline.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/orderAccess_solaris_sparc.inline.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #ifndef OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_INLINE_HPP
 #define OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_INLINE_HPP
 
+#include "runtime/atomic.inline.hpp"
 #include "runtime/orderAccess.hpp"
 #include "vm_version_sparc.hpp"
 
--- a/hotspot/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #ifndef OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_INLINE_HPP
 #define OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_INLINE_HPP
 
-#include "orderAccess_solaris_x86.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "vm_version_x86.hpp"
--- a/hotspot/src/os_cpu/solaris_x86/vm/orderAccess_solaris_x86.inline.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/os_cpu/solaris_x86/vm/orderAccess_solaris_x86.inline.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP
 #define OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP
 
-#include "runtime/atomic.hpp"
+#include "runtime/atomic.inline.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 #include "vm_version_x86.hpp"
--- a/hotspot/src/os_cpu/windows_x86/vm/atomic_windows_x86.inline.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/os_cpu/windows_x86/vm/atomic_windows_x86.inline.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #ifndef OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_INLINE_HPP
 #define OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_INLINE_HPP
 
-#include "orderAccess_windows_x86.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "vm_version_x86.hpp"
--- a/hotspot/src/os_cpu/windows_x86/vm/orderAccess_windows_x86.inline.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/os_cpu/windows_x86/vm/orderAccess_windows_x86.inline.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,12 +25,11 @@
 #ifndef OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
 #define OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
 
-#include "runtime/atomic.hpp"
+#include "runtime/atomic.inline.hpp"
 #include "runtime/orderAccess.hpp"
+#include "runtime/os.hpp"
 #include "vm_version_x86.hpp"
 
-#pragma warning(disable: 4035) // Disables warnings reporting missing return statement
-
 // Implementation of class OrderAccess.
 
 inline void OrderAccess::loadload()   { acquire(); }
@@ -214,6 +213,4 @@
 #endif // AMD64
 }
 
-#pragma warning(default: 4035) // Enables warnings reporting missing return statement
-
 #endif // OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
--- a/hotspot/src/share/vm/c1/c1_FrameMap.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/c1/c1_FrameMap.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -308,27 +308,6 @@
   return sp_offset_for_monitor_base(index) + in_ByteSize(BasicObjectLock::obj_offset_in_bytes());
 }
 
-void FrameMap::print_frame_layout() const {
-  int svar;
-  tty->print_cr("#####################################");
-  tty->print_cr("Frame size in words %d", framesize());
-
-  if( _num_monitors > 0) {
-    tty->print_cr("monitor [0]:%d | [%2d]:%d",
-                  in_bytes(sp_offset_for_monitor_base(0)),
-                  in_bytes(sp_offset_for_monitor_base(_num_monitors)));
-  }
-  if( _num_spills > 0) {
-    svar = _num_spills - 1;
-    if(svar == 0)
-      tty->print_cr("spill   [0]:%d", in_bytes(sp_offset_for_spill(0)));
-    else
-      tty->print_cr("spill   [0]:%d | [%2d]:%d", in_bytes(sp_offset_for_spill(0)),
-                    svar,
-                    in_bytes(sp_offset_for_spill(svar)));
-  }
-}
-
 
 // For OopMaps, map a local variable or spill index to an VMReg.
 // This is the offset from sp() in the frame of the slot for the index,
--- a/hotspot/src/share/vm/c1/c1_FrameMap.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/c1/c1_FrameMap.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -226,8 +226,6 @@
     return make_new_address(sp_offset_for_monitor_object(monitor_index));
   }
 
-  void print_frame_layout() const;
-
   // Creates Location describing desired slot and returns it via pointer
   // to Location object. Returns true if the stack frame offset was legal
   // (as defined by Location::legal_offset_in_bytes()), false otherwise.
--- a/hotspot/src/share/vm/classfile/classLoaderData.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/classfile/classLoaderData.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -234,6 +234,7 @@
   void add_to_deallocate_list(Metadata* m);
 
   static ClassLoaderData* class_loader_data(oop loader);
+  static ClassLoaderData* class_loader_data_or_null(oop loader);
   static ClassLoaderData* anonymous_class_loader_data(oop loader, TRAPS);
   static void print_loader(ClassLoaderData *loader_data, outputStream *out);
 
--- a/hotspot/src/share/vm/classfile/classLoaderData.inline.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/classfile/classLoaderData.inline.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -25,9 +25,15 @@
 #include "classfile/classLoaderData.hpp"
 #include "classfile/javaClasses.hpp"
 
+inline ClassLoaderData* ClassLoaderData::class_loader_data_or_null(oop loader) {
+  if (loader == NULL) {
+    return ClassLoaderData::the_null_class_loader_data();
+  }
+  return java_lang_ClassLoader::loader_data(loader);
+}
+
 inline ClassLoaderData* ClassLoaderData::class_loader_data(oop loader) {
-  if (loader == NULL) return ClassLoaderData::the_null_class_loader_data();
-  ClassLoaderData* loader_data = java_lang_ClassLoader::loader_data(loader);
+  ClassLoaderData* loader_data = class_loader_data_or_null(loader);
   assert(loader_data != NULL, "Must be");
   return loader_data;
 }
--- a/hotspot/src/share/vm/classfile/dictionary.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/classfile/dictionary.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -347,6 +347,7 @@
   assert_locked_or_safepoint(SystemDictionary_lock);
   assert(obj() != NULL, "adding NULL obj");
   assert(obj()->name() == class_name, "sanity check on name");
+  assert(loader_data != NULL, "Must be non-NULL");
 
   unsigned int hash = compute_hash(class_name, loader_data);
   int index = hash_to_index(hash);
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -866,16 +866,22 @@
 // the new entry.
 
 Klass* SystemDictionary::find(Symbol* class_name,
-                                Handle class_loader,
-                                Handle protection_domain,
-                                TRAPS) {
+                              Handle class_loader,
+                              Handle protection_domain,
+                              TRAPS) {
 
   // UseNewReflection
   // The result of this call should be consistent with the result
   // of the call to resolve_instance_class_or_null().
   // See evaluation 6790209 and 4474172 for more details.
   class_loader = Handle(THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader()));
-  ClassLoaderData* loader_data = register_loader(class_loader, CHECK_NULL);
+  ClassLoaderData* loader_data = ClassLoaderData::class_loader_data_or_null(class_loader());
+
+  if (loader_data == NULL) {
+    // If the ClassLoaderData has not been setup,
+    // then the class loader has no entries in the dictionary.
+    return NULL;
+  }
 
   unsigned int d_hash = dictionary()->compute_hash(class_name, loader_data);
   int d_index = dictionary()->hash_to_index(d_hash);
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -300,8 +300,7 @@
   }
 }
 
-// Wait until the next synchronous GC, a concurrent full gc request,
-// or a timeout, whichever is earlier.
+// Wait until any cms_lock event
 void ConcurrentMarkSweepThread::wait_on_cms_lock(long t_millis) {
   MutexLockerEx x(CGC_lock,
                   Mutex::_no_safepoint_check_flag);
@@ -315,15 +314,100 @@
          "Should not be set");
 }
 
+// Wait until the next synchronous GC, a concurrent full gc request,
+// or a timeout, whichever is earlier.
+void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
+  // Wait time in millis or 0 value representing infinite wait for a scavenge
+  assert(t_millis >= 0, "Wait time for scavenge should be 0 or positive");
+
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  double start_time_secs = os::elapsedTime();
+  double end_time_secs = start_time_secs + (t_millis / ((double) MILLIUNITS));
+
+  // Total collections count before waiting loop
+  unsigned int before_count;
+  {
+    MutexLockerEx hl(Heap_lock, Mutex::_no_safepoint_check_flag);
+    before_count = gch->total_collections();
+  }
+
+  unsigned int loop_count = 0;
+
+  while(!_should_terminate) {
+    double now_time = os::elapsedTime();
+    long wait_time_millis;
+
+    if(t_millis != 0) {
+      // New wait limit
+      wait_time_millis = (long) ((end_time_secs - now_time) * MILLIUNITS);
+      if(wait_time_millis <= 0) {
+        // Wait time is over
+        break;
+      }
+    } else {
+      // No wait limit, wait if necessary forever
+      wait_time_millis = 0;
+    }
+
+    // Wait until the next event or the remaining timeout
+    {
+      MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
+
+      if (_should_terminate || _collector->_full_gc_requested) {
+        return;
+      }
+      set_CMS_flag(CMS_cms_wants_token);   // to provoke notifies
+      assert(t_millis == 0 || wait_time_millis > 0, "Sanity");
+      CGC_lock->wait(Mutex::_no_safepoint_check_flag, wait_time_millis);
+      clear_CMS_flag(CMS_cms_wants_token);
+      assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
+             "Should not be set");
+    }
+
+    // Extra wait time check before entering the heap lock to get the collection count
+    if(t_millis != 0 && os::elapsedTime() >= end_time_secs) {
+      // Wait time is over
+      break;
+    }
+
+    // Total collections count after the event
+    unsigned int after_count;
+    {
+      MutexLockerEx hl(Heap_lock, Mutex::_no_safepoint_check_flag);
+      after_count = gch->total_collections();
+    }
+
+    if(before_count != after_count) {
+      // There was a collection - success
+      break;
+    }
+
+    // Too many loops warning
+    if(++loop_count == 0) {
+      warning("wait_on_cms_lock_for_scavenge() has looped %u times", loop_count - 1);
+    }
+  }
+}
+
 void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
   while (!_should_terminate) {
     if (CMSIncrementalMode) {
       icms_wait();
+      if(CMSWaitDuration >= 0) {
+        // Wait until the next synchronous GC, a concurrent full gc
+        // request or a timeout, whichever is earlier.
+        wait_on_cms_lock_for_scavenge(CMSWaitDuration);
+      }
       return;
     } else {
-      // Wait until the next synchronous GC, a concurrent full gc
-      // request or a timeout, whichever is earlier.
-      wait_on_cms_lock(CMSWaitDuration);
+      if(CMSWaitDuration >= 0) {
+        // Wait until the next synchronous GC, a concurrent full gc
+        // request or a timeout, whichever is earlier.
+        wait_on_cms_lock_for_scavenge(CMSWaitDuration);
+      } else {
+        // Wait until any cms_lock event or check interval not to call shouldConcurrentCollect permanently
+        wait_on_cms_lock(CMSCheckInterval);
+      }
     }
     // Check if we should start a CMS collection cycle
     if (_collector->shouldConcurrentCollect()) {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -130,6 +130,12 @@
   // A concurrent full gc request terminates the wait.
   void wait_on_cms_lock(long t_millis);
 
+  // Wait on CMS lock until the next synchronous GC
+  // or given timeout, whichever is earlier. A timeout value
+  // of 0 indicates that there is no upper bound on the wait time.
+  // A concurrent full gc request terminates the wait.
+  void wait_on_cms_lock_for_scavenge(long t_millis);
+
   // The CMS thread will yield during the work portion of its cycle
   // only when requested to.  Both synchronous and asychronous requests
   // are provided:
--- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -146,43 +146,6 @@
   verify();
 }
 
-uint CollectionSetChooser::calc_min_old_cset_length() {
-  // The min old CSet region bound is based on the maximum desired
-  // number of mixed GCs after a cycle. I.e., even if some old regions
-  // look expensive, we should add them to the CSet anyway to make
-  // sure we go through the available old regions in no more than the
-  // maximum desired number of mixed GCs.
-  //
-  // The calculation is based on the number of marked regions we added
-  // to the CSet chooser in the first place, not how many remain, so
-  // that the result is the same during all mixed GCs that follow a cycle.
-
-  const size_t region_num = (size_t) _length;
-  const size_t gc_num = (size_t) G1MixedGCCountTarget;
-  size_t result = region_num / gc_num;
-  // emulate ceiling
-  if (result * gc_num < region_num) {
-    result += 1;
-  }
-  return (uint) result;
-}
-
-uint CollectionSetChooser::calc_max_old_cset_length() {
-  // The max old CSet region bound is based on the threshold expressed
-  // as a percentage of the heap size. I.e., it should bound the
-  // number of old regions added to the CSet irrespective of how many
-  // of them are available.
-
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  const size_t region_num = g1h->n_regions();
-  const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
-  size_t result = region_num * perc / 100;
-  // emulate ceiling
-  if (100 * result < region_num * perc) {
-    result += 1;
-  }
-  return (uint) result;
-}
 
 void CollectionSetChooser::add_region(HeapRegion* hr) {
   assert(!hr->isHumongous(),
--- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -51,6 +51,8 @@
   uint _curr_index;
 
   // The number of candidate old regions added to the CSet chooser.
+  // Note: this is not updated when removing a region using
+  // remove_and_move_to_next() below.
   uint _length;
 
   // Keeps track of the start of the next array chunk to be claimed by
@@ -111,13 +113,8 @@
             hr->live_bytes() < _region_live_threshold_bytes;
   }
 
-  // Calculate the minimum number of old regions we'll add to the CSet
-  // during a mixed GC.
-  uint calc_min_old_cset_length();
-
-  // Calculate the maximum number of old regions we'll add to the CSet
-  // during a mixed GC.
-  uint calc_max_old_cset_length();
+  // Returns the number candidate old regions added
+  uint length() { return _length; }
 
   // Serial version.
   void add_region(HeapRegion *hr);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1806,6 +1806,14 @@
 }
 #endif // !PRODUCT
 
+double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) {
+  // Returns the given amount of reclaimable bytes (that represents
+  // the amount of reclaimable space still to be collected) as a
+  // percentage of the current heap capacity.
+  size_t capacity_bytes = _g1->capacity();
+  return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
+}
+
 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
                                                 const char* false_action_str) {
   CollectionSetChooser* cset_chooser = _collectionSetChooser;
@@ -1815,19 +1823,21 @@
                   ergo_format_reason("candidate old regions not available"));
     return false;
   }
+
+  // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
   size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
-  size_t capacity_bytes = _g1->capacity();
-  double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
+  double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
   double threshold = (double) G1HeapWastePercent;
-  if (perc < threshold) {
+  if (reclaimable_perc <= threshold) {
     ergo_verbose4(ErgoMixedGCs,
               false_action_str,
-              ergo_format_reason("reclaimable percentage lower than threshold")
+              ergo_format_reason("reclaimable percentage not over threshold")
               ergo_format_region("candidate old regions")
               ergo_format_byte_perc("reclaimable")
               ergo_format_perc("threshold"),
               cset_chooser->remaining_regions(),
-              reclaimable_bytes, perc, threshold);
+              reclaimable_bytes,
+              reclaimable_perc, threshold);
     return false;
   }
 
@@ -1838,10 +1848,50 @@
                 ergo_format_byte_perc("reclaimable")
                 ergo_format_perc("threshold"),
                 cset_chooser->remaining_regions(),
-                reclaimable_bytes, perc, threshold);
+                reclaimable_bytes,
+                reclaimable_perc, threshold);
   return true;
 }
 
+uint G1CollectorPolicy::calc_min_old_cset_length() {
+  // The min old CSet region bound is based on the maximum desired
+  // number of mixed GCs after a cycle. I.e., even if some old regions
+  // look expensive, we should add them to the CSet anyway to make
+  // sure we go through the available old regions in no more than the
+  // maximum desired number of mixed GCs.
+  //
+  // The calculation is based on the number of marked regions we added
+  // to the CSet chooser in the first place, not how many remain, so
+  // that the result is the same during all mixed GCs that follow a cycle.
+
+  const size_t region_num = (size_t) _collectionSetChooser->length();
+  const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
+  size_t result = region_num / gc_num;
+  // emulate ceiling
+  if (result * gc_num < region_num) {
+    result += 1;
+  }
+  return (uint) result;
+}
+
+uint G1CollectorPolicy::calc_max_old_cset_length() {
+  // The max old CSet region bound is based on the threshold expressed
+  // as a percentage of the heap size. I.e., it should bound the
+  // number of old regions added to the CSet irrespective of how many
+  // of them are available.
+
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  const size_t region_num = g1h->n_regions();
+  const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
+  size_t result = region_num * perc / 100;
+  // emulate ceiling
+  if (100 * result < region_num * perc) {
+    result += 1;
+  }
+  return (uint) result;
+}
+
+
 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
   double young_start_time_sec = os::elapsedTime();
 
@@ -1855,7 +1905,7 @@
 
   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
   double predicted_pause_time_ms = base_time_ms;
-  double time_remaining_ms = target_pause_time_ms - base_time_ms;
+  double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
 
   ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
                 "start choosing CSet",
@@ -1893,7 +1943,7 @@
 
   _collection_set = _inc_cset_head;
   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
-  time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
+  time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0);
   predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
 
   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
@@ -1917,8 +1967,8 @@
   if (!gcs_are_young()) {
     CollectionSetChooser* cset_chooser = _collectionSetChooser;
     cset_chooser->verify();
-    const uint min_old_cset_length = cset_chooser->calc_min_old_cset_length();
-    const uint max_old_cset_length = cset_chooser->calc_max_old_cset_length();
+    const uint min_old_cset_length = calc_min_old_cset_length();
+    const uint max_old_cset_length = calc_max_old_cset_length();
 
     uint expensive_region_num = 0;
     bool check_time_remaining = adaptive_young_list_length();
@@ -1936,6 +1986,30 @@
         break;
       }
 
+
+      // Stop adding regions if the remaining reclaimable space is
+      // not above G1HeapWastePercent.
+      size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
+      double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
+      double threshold = (double) G1HeapWastePercent;
+      if (reclaimable_perc <= threshold) {
+        // We've added enough old regions that the amount of uncollected
+        // reclaimable space is at or below the waste threshold. Stop
+        // adding old regions to the CSet.
+        ergo_verbose5(ErgoCSetConstruction,
+                      "finish adding old regions to CSet",
+                      ergo_format_reason("reclaimable percentage not over threshold")
+                      ergo_format_region("old")
+                      ergo_format_region("max")
+                      ergo_format_byte_perc("reclaimable")
+                      ergo_format_perc("threshold"),
+                      old_cset_region_length(),
+                      max_old_cset_length,
+                      reclaimable_bytes,
+                      reclaimable_perc, threshold);
+        break;
+      }
+
       double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
       if (check_time_remaining) {
         if (predicted_time_ms > time_remaining_ms) {
@@ -1975,7 +2049,7 @@
       }
 
       // We will add this region to the CSet.
-      time_remaining_ms -= predicted_time_ms;
+      time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
       predicted_pause_time_ms += predicted_time_ms;
       cset_chooser->remove_and_move_to_next(hr);
       _g1->old_set_remove(hr);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -619,6 +619,18 @@
   bool predict_will_fit(uint young_length, double base_time_ms,
                         uint base_free_regions, double target_pause_time_ms);
 
+  // Calculate the minimum number of old regions we'll add to the CSet
+  // during a mixed GC.
+  uint calc_min_old_cset_length();
+
+  // Calculate the maximum number of old regions we'll add to the CSet
+  // during a mixed GC.
+  uint calc_max_old_cset_length();
+
+  // Returns the given amount of uncollected reclaimable space
+  // as a percentage of the current heap capacity.
+  double reclaimable_bytes_perc(size_t reclaimable_bytes);
+
 public:
 
   G1CollectorPolicy();
--- a/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -373,6 +373,8 @@
                          " does not exceed used.end() = " PTR_FORMAT ","
                          " yet last_chunk_index_to_check " INTPTR_FORMAT
                          " exceeds last_chunk_index " INTPTR_FORMAT,
+                         last_block, last_block + last_block_size,
+                         used.end(),
                          last_chunk_index_to_check, last_chunk_index));
           assert(sp->used_region().end() > used.end(),
                  err_msg("Expansion did not happen: "
--- a/hotspot/src/share/vm/memory/allocation.inline.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/memory/allocation.inline.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
 #define SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
 
+#include "runtime/atomic.inline.hpp"
 #include "runtime/os.hpp"
 
 // Explicit C-heap memory management
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -694,7 +694,7 @@
     if (failed) {
       if (!failures) {
         tty->cr();
-        tty->print_cr("== CT verification failed: ["PTR_FORMAT","PTR_FORMAT"]");
+        tty->print_cr("== CT verification failed: ["PTR_FORMAT","PTR_FORMAT"]", start, end);
         tty->print_cr("==   %sexpecting value: %d",
                       (val_equals) ? "" : "not ", val);
         failures = true;
--- a/hotspot/src/share/vm/memory/cardTableRS.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/memory/cardTableRS.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -353,7 +353,7 @@
     assert(jp >= _begin && jp < _end,
            err_msg("Error: jp " PTR_FORMAT " should be within "
                    "[_begin, _end) = [" PTR_FORMAT "," PTR_FORMAT ")",
-                   _begin, _end));
+                   jp, _begin, _end));
     oop obj = oopDesc::load_decode_heap_oop(p);
     guarantee(obj == NULL || (HeapWord*)obj >= _boundary,
               err_msg("pointer " PTR_FORMAT " at " PTR_FORMAT " on "
--- a/hotspot/src/share/vm/memory/metaspaceCounters.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/memory/metaspaceCounters.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -25,12 +25,14 @@
 #include "precompiled.hpp"
 #include "memory/metaspaceCounters.hpp"
 #include "memory/resourceArea.hpp"
-
-#define METASPACE_NAME "perm"
+#include "utilities/exceptions.hpp"
 
 MetaspaceCounters* MetaspaceCounters::_metaspace_counters = NULL;
 
-MetaspaceCounters::MetaspaceCounters() {
+MetaspaceCounters::MetaspaceCounters() :
+    _capacity(NULL),
+    _used(NULL),
+    _max_capacity(NULL) {
   if (UsePerfData) {
     size_t min_capacity = MetaspaceAux::min_chunk_size();
     size_t max_capacity = MetaspaceAux::reserved_in_bytes();
@@ -41,6 +43,25 @@
   }
 }
 
+static PerfVariable* create_ms_variable(const char *ns,
+                                        const char *name,
+                                        size_t value,
+                                        TRAPS) {
+  const char *path = PerfDataManager::counter_name(ns, name);
+  PerfVariable *result =
+      PerfDataManager::create_variable(SUN_GC, path, PerfData::U_Bytes, value,
+                                       CHECK_NULL);
+  return result;
+}
+
+static void create_ms_constant(const char *ns,
+                               const char *name,
+                               size_t value,
+                               TRAPS) {
+  const char *path = PerfDataManager::counter_name(ns, name);
+  PerfDataManager::create_constant(SUN_GC, path, PerfData::U_Bytes, value, CHECK);
+}
+
 void MetaspaceCounters::initialize(size_t min_capacity,
                                    size_t max_capacity,
                                    size_t curr_capacity,
@@ -50,93 +71,32 @@
     EXCEPTION_MARK;
     ResourceMark rm;
 
-    // Create a name that will be recognized by jstat tools as
-    // the perm gen.  Change this to a Metaspace name when the
-    // tools are fixed.
-    // name to recognize "sun.gc.generation.2.*"
-
-    const char* name = METASPACE_NAME;
-    const int ordinal = 2;
-    const int spaces = 1;
-
-    const char* cns = PerfDataManager::name_space("generation", ordinal);
-
-    _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtClass);
-    strcpy(_name_space, cns);
-
-    const char* cname = PerfDataManager::counter_name(_name_space, "name");
-    PerfDataManager::create_string_constant(SUN_GC, cname, name, CHECK);
-
-    // End of perm gen like name creation
-
-    cname = PerfDataManager::counter_name(_name_space, "spaces");
-    PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None,
-                                     spaces, CHECK);
-
-    cname = PerfDataManager::counter_name(_name_space, "minCapacity");
-    PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
-                                     min_capacity, CHECK);
-
-    cname = PerfDataManager::counter_name(_name_space, "maxCapacity");
-    PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
-                                     max_capacity, CHECK);
+    const char *ms = "metaspace";
 
-    cname = PerfDataManager::counter_name(_name_space, "capacity");
-    _current_size =
-      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
-                                       curr_capacity, CHECK);
-
-    // SpaceCounter like counters
-    // name to recognize "sun.gc.generation.2.space.0.*"
-    {
-      const int space_ordinal = 0;
-      const char* cns = PerfDataManager::name_space(_name_space, "space",
-                                                    space_ordinal);
-
-      char* space_name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtClass);
-      strcpy(space_name_space, cns);
-
-      const char* cname = PerfDataManager::counter_name(space_name_space, "name");
-      PerfDataManager::create_string_constant(SUN_GC, cname, name, CHECK);
-
-      cname = PerfDataManager::counter_name(space_name_space, "maxCapacity");
-      _max_capacity = PerfDataManager::create_variable(SUN_GC, cname,
-                                                       PerfData::U_Bytes,
-                                                       (jlong)max_capacity, CHECK);
-
-      cname = PerfDataManager::counter_name(space_name_space, "capacity");
-      _capacity = PerfDataManager::create_variable(SUN_GC, cname,
-                                                   PerfData::U_Bytes,
-                                                   curr_capacity, CHECK);
-
-      cname = PerfDataManager::counter_name(space_name_space, "used");
-      _used = PerfDataManager::create_variable(SUN_GC,
-                                               cname,
-                                               PerfData::U_Bytes,
-                                               used,
-                                               CHECK);
-
-    cname = PerfDataManager::counter_name(space_name_space, "initCapacity");
-    PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
-                                     min_capacity, CHECK);
-    }
+    create_ms_constant(ms, "minCapacity", min_capacity, CHECK);
+    _max_capacity = create_ms_variable(ms, "maxCapacity", max_capacity, CHECK);
+    _capacity = create_ms_variable(ms, "capacity", curr_capacity, CHECK);
+    _used = create_ms_variable(ms, "used", used, CHECK);
   }
 }
 
 void MetaspaceCounters::update_capacity() {
   assert(UsePerfData, "Should not be called unless being used");
+  assert(_capacity != NULL, "Should be initialized");
   size_t capacity_in_bytes = MetaspaceAux::capacity_in_bytes();
   _capacity->set_value(capacity_in_bytes);
 }
 
 void MetaspaceCounters::update_used() {
   assert(UsePerfData, "Should not be called unless being used");
+  assert(_used != NULL, "Should be initialized");
   size_t used_in_bytes = MetaspaceAux::used_in_bytes();
   _used->set_value(used_in_bytes);
 }
 
 void MetaspaceCounters::update_max_capacity() {
   assert(UsePerfData, "Should not be called unless being used");
+  assert(_max_capacity != NULL, "Should be initialized");
   size_t reserved_in_bytes = MetaspaceAux::reserved_in_bytes();
   _max_capacity->set_value(reserved_in_bytes);
 }
@@ -146,18 +106,19 @@
     update_used();
     update_capacity();
     update_max_capacity();
-    _current_size->set_value(MetaspaceAux::reserved_in_bytes());
   }
 }
 
 void MetaspaceCounters::initialize_performance_counters() {
   if (UsePerfData) {
+    assert(_metaspace_counters == NULL, "Should only be initialized once");
     _metaspace_counters = new MetaspaceCounters();
   }
 }
 
 void MetaspaceCounters::update_performance_counters() {
   if (UsePerfData) {
+    assert(_metaspace_counters != NULL, "Should be initialized");
     _metaspace_counters->update_all();
   }
 }
--- a/hotspot/src/share/vm/memory/metaspaceCounters.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/memory/metaspaceCounters.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -29,11 +29,9 @@
 
 class MetaspaceCounters: public CHeapObj<mtClass> {
   friend class VMStructs;
-  PerfVariable*      _current_size;
   PerfVariable*      _capacity;
   PerfVariable*      _used;
   PerfVariable*      _max_capacity;
-  char*              _name_space;
   static MetaspaceCounters* _metaspace_counters;
   void initialize(size_t min_capacity,
                   size_t max_capacity,
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -2170,7 +2170,11 @@
       if (impl != NULL) {
         if (!impl->is_loader_alive(is_alive)) {
           // remove this guy
-          *adr_implementor() = NULL;
+          Klass** klass = adr_implementor();
+          assert(klass != NULL, "null klass");
+          if (klass != NULL) {
+            *klass = NULL;
+          }
         }
       }
     }
@@ -3151,9 +3155,10 @@
   if (protection_domain() != NULL) {
     guarantee(protection_domain()->is_oop(), "should be oop");
   }
-  if (host_klass() != NULL) {
-    guarantee(host_klass()->is_metadata(), "should be in metaspace");
-    guarantee(host_klass()->is_klass(), "should be klass");
+  const Klass* host = host_klass();
+  if (host != NULL) {
+    guarantee(host->is_metadata(), "should be in metaspace");
+    guarantee(host->is_klass(), "should be klass");
   }
   if (signers() != NULL) {
     guarantee(signers()->is_objArray(), "should be obj array");
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -536,7 +536,9 @@
     assert(is_anonymous(), "not anonymous");
     Klass** addr = (Klass**)adr_host_klass();
     assert(addr != NULL, "no reversed space");
-    *addr = host;
+    if (addr != NULL) {
+      *addr = host;
+    }
   }
   bool is_anonymous() const                {
     return (_misc_flags & _misc_is_anonymous) != 0;
@@ -758,7 +760,10 @@
   void set_implementor(Klass* k) {
     assert(is_interface(), "not interface");
     Klass** addr = adr_implementor();
-    *addr = k;
+    assert(addr != NULL, "null addr");
+    if (addr != NULL) {
+      *addr = k;
+    }
   }
 
   int  nof_implementors() const       {
--- a/hotspot/src/share/vm/oops/symbol.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/oops/symbol.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
 #include "classfile/altHashing.hpp"
 #include "classfile/classLoaderData.hpp"
 #include "oops/symbol.hpp"
+#include "runtime/atomic.inline.hpp"
 #include "runtime/os.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
@@ -210,6 +211,28 @@
   return AltHashing::murmur3_32(seed, (const jbyte*)as_C_string(), utf8_length());
 }
 
+void Symbol::increment_refcount() {
+  // Only increment the refcount if positive.  If negative either
+  // overflow has occurred or it is a permanent symbol in a read only
+  // shared archive.
+  if (_refcount >= 0) {
+    Atomic::inc(&_refcount);
+    NOT_PRODUCT(Atomic::inc(&_total_count);)
+  }
+}
+
+void Symbol::decrement_refcount() {
+  if (_refcount >= 0) {
+    Atomic::dec(&_refcount);
+#ifdef ASSERT
+    if (_refcount < 0) {
+      print();
+      assert(false, "reference count underflow for symbol");
+    }
+#endif
+  }
+}
+
 void Symbol::print_on(outputStream* st) const {
   if (this == NULL) {
     st->print_cr("NULL");
--- a/hotspot/src/share/vm/oops/symbol.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/oops/symbol.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,6 @@
 
 #include "utilities/utf8.hpp"
 #include "memory/allocation.hpp"
-#include "runtime/atomic.hpp"
 
 // A Symbol is a canonicalized string.
 // All Symbols reside in global SymbolTable and are reference counted.
@@ -150,8 +149,8 @@
 
   // Reference counting.  See comments above this class for when to use.
   int refcount() const      { return _refcount; }
-  inline void increment_refcount();
-  inline void decrement_refcount();
+  void increment_refcount();
+  void decrement_refcount();
 
   int byte_at(int index) const {
     assert(index >=0 && index < _length, "symbol index overflow");
@@ -232,26 +231,4 @@
  return (((uintptr_t)this < (uintptr_t)other) ? -1
    : ((uintptr_t)this == (uintptr_t) other) ? 0 : 1);
 }
-
-inline void Symbol::increment_refcount() {
-  // Only increment the refcount if positive.  If negative either
-  // overflow has occurred or it is a permanent symbol in a read only
-  // shared archive.
-  if (_refcount >= 0) {
-    Atomic::inc(&_refcount);
-    NOT_PRODUCT(Atomic::inc(&_total_count);)
-  }
-}
-
-inline void Symbol::decrement_refcount() {
-  if (_refcount >= 0) {
-    Atomic::dec(&_refcount);
-#ifdef ASSERT
-    if (_refcount < 0) {
-      print();
-      assert(false, "reference count underflow for symbol");
-    }
-#endif
-  }
-}
 #endif // SHARE_VM_OOPS_SYMBOL_HPP
--- a/hotspot/src/share/vm/opto/c2_globals.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/opto/c2_globals.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -54,6 +54,12 @@
 
 #define C2_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct) \
                                                                             \
+  develop(bool, StressLCM, false,                                           \
+          "Randomize instruction scheduling in LCM")                        \
+                                                                            \
+  develop(bool, StressGCM, false,                                           \
+          "Randomize instruction scheduling in GCM")                        \
+                                                                            \
   notproduct(intx, CompileZapFirst, 0,                                      \
           "If +ZapDeadCompiledLocals, "                                     \
           "skip this many before compiling in zap calls")                   \
--- a/hotspot/src/share/vm/opto/compile.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/opto/compile.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -2899,6 +2899,13 @@
       }
     }
     break;
+  case Op_MemBarStoreStore:
+    // Break the link with AllocateNode: it is no longer useful and
+    // confuses register allocation.
+    if (n->req() > MemBarNode::Precedent) {
+      n->set_req(MemBarNode::Precedent, top());
+    }
+    break;
   default:
     assert( !n->is_Call(), "" );
     assert( !n->is_Mem(), "" );
@@ -3669,3 +3676,38 @@
     n->set_req(0, NULL);
   }
 }
+
+// Auxiliary method to support randomized stressing/fuzzing.
+//
+// This method can be called the arbitrary number of times, with current count
+// as the argument. The logic allows selecting a single candidate from the
+// running list of candidates as follows:
+//    int count = 0;
+//    Cand* selected = null;
+//    while(cand = cand->next()) {
+//      if (randomized_select(++count)) {
+//        selected = cand;
+//      }
+//    }
+//
+// Including count equalizes the chances any candidate is "selected".
+// This is useful when we don't have the complete list of candidates to choose
+// from uniformly. In this case, we need to adjust the randomicity of the
+// selection, or else we will end up biasing the selection towards the latter
+// candidates.
+//
+// Quick back-envelope calculation shows that for the list of n candidates
+// the equal probability for the candidate to persist as "best" can be
+// achieved by replacing it with "next" k-th candidate with the probability
+// of 1/k. It can be easily shown that by the end of the run, the
+// probability for any candidate is converged to 1/n, thus giving the
+// uniform distribution among all the candidates.
+//
+// We don't care about the domain size as long as (RANDOMIZED_DOMAIN / count) is large.
+#define RANDOMIZED_DOMAIN_POW 29
+#define RANDOMIZED_DOMAIN (1 << RANDOMIZED_DOMAIN_POW)
+#define RANDOMIZED_DOMAIN_MASK ((1 << (RANDOMIZED_DOMAIN_POW + 1)) - 1)
+bool Compile::randomized_select(int count) {
+  assert(count > 0, "only positive");
+  return (os::random() & RANDOMIZED_DOMAIN_MASK) < (RANDOMIZED_DOMAIN / count);
+}
--- a/hotspot/src/share/vm/opto/compile.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/opto/compile.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -678,6 +678,7 @@
   void         record_dead_node(uint idx)  { if (_dead_node_list.test_set(idx)) return;
                                              _dead_node_count++;
                                            }
+  bool         is_dead_node(uint idx)      { return _dead_node_list.test(idx) != 0; }
   uint         dead_node_count()           { return _dead_node_count; }
   void         reset_dead_node_list()      { _dead_node_list.Reset();
                                              _dead_node_count = 0;
@@ -1086,6 +1087,9 @@
 
   // Definitions of pd methods
   static void pd_compiler2_init();
+
+  // Auxiliary method for randomized fuzzing/stressing
+  static bool randomized_select(int count);
 };
 
 #endif // SHARE_VM_OPTO_COMPILE_HPP
--- a/hotspot/src/share/vm/opto/gcm.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/opto/gcm.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1046,6 +1046,8 @@
   }
 #endif
 
+  int cand_cnt = 0;  // number of candidates tried
+
   // Walk up the dominator tree from LCA (Lowest common ancestor) to
   // the earliest legal location.  Capture the least execution frequency.
   while (LCA != early) {
@@ -1071,8 +1073,11 @@
         LCA->_pre_order, LCA->_nodes[0]->_idx, start_lat, end_idx, end_lat, LCA_freq);
     }
 #endif
+    cand_cnt++;
     if (LCA_freq < least_freq              || // Better Frequency
-        ( !in_latency                   &&    // No block containing latency
+        (StressGCM && Compile::randomized_select(cand_cnt)) || // Should be randomly accepted in stress mode
+         (!StressGCM                    &&    // Otherwise, choose with latency
+          !in_latency                   &&    // No block containing latency
           LCA_freq < least_freq * delta &&    // No worse frequency
           target >= end_lat             &&    // within latency range
           !self->is_iteratively_computed() )  // But don't hoist IV increments
@@ -1210,7 +1215,8 @@
     }
 
     // If there is no opportunity to hoist, then we're done.
-    bool try_to_hoist = (LCA != early);
+    // In stress mode, try to hoist even the single operations.
+    bool try_to_hoist = StressGCM || (LCA != early);
 
     // Must clone guys stay next to use; no hoisting allowed.
     // Also cannot hoist guys that alter memory or are otherwise not
--- a/hotspot/src/share/vm/opto/lcm.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/opto/lcm.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -421,6 +421,7 @@
   uint latency = 0; // Bigger is scheduled first
   uint score   = 0; // Bigger is better
   int idx = -1;     // Index in worklist
+  int cand_cnt = 0; // Candidate count
 
   for( uint i=0; i<cnt; i++ ) { // Inspect entire worklist
     // Order in worklist is used to break ties.
@@ -503,11 +504,14 @@
     uint n_score   = n->req();   // Many inputs get high score to break ties
 
     // Keep best latency found
-    if( choice < n_choice ||
-        ( choice == n_choice &&
-          ( latency < n_latency ||
-            ( latency == n_latency &&
-              ( score < n_score ))))) {
+    cand_cnt++;
+    if (choice < n_choice ||
+        (choice == n_choice &&
+         ((StressLCM && Compile::randomized_select(cand_cnt)) ||
+          (!StressLCM &&
+           (latency < n_latency ||
+            (latency == n_latency &&
+             (score < n_score))))))) {
       choice  = n_choice;
       latency = n_latency;
       score   = n_score;
--- a/hotspot/src/share/vm/opto/macro.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/opto/macro.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1101,12 +1101,6 @@
   Node* klass_node        = alloc->in(AllocateNode::KlassNode);
   Node* initial_slow_test = alloc->in(AllocateNode::InitialTest);
 
-  Node* storestore = alloc->storestore();
-  if (storestore != NULL) {
-    // Break this link that is no longer useful and confuses register allocation
-    storestore->set_req(MemBarNode::Precedent, top());
-  }
-
   assert(ctrl != NULL, "must have control");
   // We need a Region and corresponding Phi's to merge the slow-path and fast-path results.
   // they will not be used if "always_slow" is set
@@ -1324,7 +1318,7 @@
         // No InitializeNode or no stores captured by zeroing
         // elimination. Simply add the MemBarStoreStore after object
         // initialization.
-        MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot, fast_oop_rawmem);
+        MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot);
         transform_later(mb);
 
         mb->init_req(TypeFunc::Memory, fast_oop_rawmem);
--- a/hotspot/src/share/vm/opto/memnode.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/opto/memnode.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -238,7 +238,7 @@
     return this;
   ctl = in(MemNode::Control);
   // Don't bother trying to transform a dead node
-  if( ctl && ctl->is_top() )  return NodeSentinel;
+  if (ctl && ctl->is_top())  return NodeSentinel;
 
   PhaseIterGVN *igvn = phase->is_IterGVN();
   // Wait if control on the worklist.
@@ -262,8 +262,8 @@
   }
   // Ignore if memory is dead, or self-loop
   Node *mem = in(MemNode::Memory);
-  if( phase->type( mem ) == Type::TOP ) return NodeSentinel; // caller will return NULL
-  assert( mem != this, "dead loop in MemNode::Ideal" );
+  if (phase->type( mem ) == Type::TOP) return NodeSentinel; // caller will return NULL
+  assert(mem != this, "dead loop in MemNode::Ideal");
 
   if (can_reshape && igvn != NULL && igvn->_worklist.member(mem)) {
     // This memory slice may be dead.
@@ -273,12 +273,12 @@
   }
 
   Node *address = in(MemNode::Address);
-  const Type *t_adr = phase->type( address );
-  if( t_adr == Type::TOP )              return NodeSentinel; // caller will return NULL
-
-  if( can_reshape && igvn != NULL &&
+  const Type *t_adr = phase->type(address);
+  if (t_adr == Type::TOP)              return NodeSentinel; // caller will return NULL
+
+  if (can_reshape && igvn != NULL &&
       (igvn->_worklist.member(address) ||
-       igvn->_worklist.size() > 0 && (phase->type(address) != adr_type())) ) {
+       igvn->_worklist.size() > 0 && (t_adr != adr_type())) ) {
     // The address's base and type may change when the address is processed.
     // Delay this mem node transformation until the address is processed.
     phase->is_IterGVN()->_worklist.push(this);
@@ -288,7 +288,7 @@
   // Do NOT remove or optimize the next lines: ensure a new alias index
   // is allocated for an oop pointer type before Escape Analysis.
   // Note: C++ will not remove it since the call has side effect.
-  if ( t_adr->isa_oopptr() ) {
+  if (t_adr->isa_oopptr()) {
     int alias_idx = phase->C->get_alias_index(t_adr->is_ptr());
   }
 
@@ -296,6 +296,26 @@
   Node* base = NULL;
   if (address->is_AddP())
     base = address->in(AddPNode::Base);
+  if (base != NULL && phase->type(base)->higher_equal(TypePtr::NULL_PTR) &&
+      !t_adr->isa_rawptr()) {
+    // Note: raw address has TOP base and top->higher_equal(TypePtr::NULL_PTR) is true.
+    Compile* C = phase->C;
+    tty->cr();
+    tty->print_cr("===== NULL+offs not RAW address =====");
+    if (C->is_dead_node(this->_idx))    tty->print_cr("'this' is dead");
+    if ((ctl != NULL) && C->is_dead_node(ctl->_idx)) tty->print_cr("'ctl' is dead");
+    if (C->is_dead_node(mem->_idx))     tty->print_cr("'mem' is dead");
+    if (C->is_dead_node(address->_idx)) tty->print_cr("'address' is dead");
+    if (C->is_dead_node(base->_idx))    tty->print_cr("'base' is dead");
+    tty->cr();
+    base->dump(1);
+    tty->cr();
+    this->dump(2);
+    tty->print("this->adr_type():     "); adr_type()->dump(); tty->cr();
+    tty->print("phase->type(address): "); t_adr->dump(); tty->cr();
+    tty->print("phase->type(base):    "); phase->type(address)->dump(); tty->cr();
+    tty->cr();
+  }
   assert(base == NULL || t_adr->isa_rawptr() ||
         !phase->type(base)->higher_equal(TypePtr::NULL_PTR), "NULL+offs not RAW address?");
 #endif
--- a/hotspot/src/share/vm/prims/jvmtiEnter.xsl	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/prims/jvmtiEnter.xsl	Fri Mar 08 08:10:00 2013 -0800
@@ -773,7 +773,7 @@
 </xsl:text>
     <xsl:apply-templates select=".." mode="traceError">     
       <xsl:with-param name="err">JVMTI_ERROR_INVALID_THREAD</xsl:with-param>
-      <xsl:with-param name="comment"> - jthread resolved to NULL - jthread = %0x%x</xsl:with-param>
+      <xsl:with-param name="comment"> - jthread resolved to NULL - jthread = 0x%x</xsl:with-param>
       <xsl:with-param name="extraValue">, <xsl:value-of select="$name"/></xsl:with-param>
     </xsl:apply-templates>
     <xsl:text>
@@ -782,7 +782,7 @@
 </xsl:text>
     <xsl:apply-templates select=".." mode="traceError">     
       <xsl:with-param name="err">JVMTI_ERROR_INVALID_THREAD</xsl:with-param>
-      <xsl:with-param name="comment"> - oop is not a thread - jthread = %0x%x</xsl:with-param>
+      <xsl:with-param name="comment"> - oop is not a thread - jthread = 0x%x</xsl:with-param>
       <xsl:with-param name="extraValue">, <xsl:value-of select="$name"/></xsl:with-param>
     </xsl:apply-templates>
     <xsl:text>
@@ -794,7 +794,7 @@
       <xsl:with-param name="err">
         <xsl:text>JVMTI_ERROR_THREAD_NOT_ALIVE</xsl:text>
       </xsl:with-param>
-      <xsl:with-param name="comment"> - not a Java thread - jthread = %0x%x</xsl:with-param>
+      <xsl:with-param name="comment"> - not a Java thread - jthread = 0x%x</xsl:with-param>
       <xsl:with-param name="extraValue">, <xsl:value-of select="$name"/></xsl:with-param>
     </xsl:apply-templates>
     <xsl:text>
@@ -838,7 +838,7 @@
 </xsl:text>
     <xsl:apply-templates select=".." mode="traceError">     
       <xsl:with-param name="err">JVMTI_ERROR_ILLEGAL_ARGUMENT</xsl:with-param>
-      <xsl:with-param name="comment"> - negative depth - jthread = %0x%x</xsl:with-param>
+      <xsl:with-param name="comment"> - negative depth - jthread = 0x%x</xsl:with-param>
       <xsl:with-param name="extraValue">, <xsl:value-of select="$name"/></xsl:with-param>
     </xsl:apply-templates>
     <xsl:text>
--- a/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -997,13 +997,19 @@
       // move our object at this point. However, our owner value is safe
       // since it is either the Lock word on a stack or a JavaThread *.
       owning_thread = Threads::owning_thread_from_monitor_owner(owner, !at_safepoint);
-      assert(owning_thread != NULL, "sanity check");
-      if (owning_thread != NULL) {  // robustness
+      // Cannot assume (owning_thread != NULL) here because this function
+      // may not have been called at a safepoint and the owning_thread
+      // might not be suspended.
+      if (owning_thread != NULL) {
         // The monitor's owner either has to be the current thread, at safepoint
         // or it has to be suspended. Any of these conditions will prevent both
         // contending and waiting threads from modifying the state of
         // the monitor.
         if (!at_safepoint && !JvmtiEnv::is_thread_fully_suspended(owning_thread, true, &debug_bits)) {
+          // Don't worry! This return of JVMTI_ERROR_THREAD_NOT_SUSPENDED
+          // will not make it back to the JVM/TI agent. The error code will
+          // get intercepted in JvmtiEnv::GetObjectMonitorUsage() which
+          // will retry the call via a VM_GetObjectMonitorUsage VM op.
           return JVMTI_ERROR_THREAD_NOT_SUSPENDED;
         }
         HandleMark hm;
--- a/hotspot/src/share/vm/runtime/frame.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/runtime/frame.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1070,7 +1070,12 @@
 
   // First consult the ADLC on where it puts parameter 0 for this signature.
   VMReg reg = SharedRuntime::name_for_receiver();
-  oop r = *caller.oopmapreg_to_location(reg, reg_map);
+  oop* oop_adr = caller.oopmapreg_to_location(reg, reg_map);
+  if (oop_adr == NULL) {
+    guarantee(oop_adr != NULL, "bad register save location");
+    return NULL;
+  }
+  oop r = *oop_adr;
   assert(Universe::heap()->is_in_or_null(r), err_msg("bad receiver: " INTPTR_FORMAT " (" INTX_FORMAT ")", (intptr_t) r, (intptr_t) r));
   return r;
 }
--- a/hotspot/src/share/vm/runtime/globals.hpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1751,6 +1751,10 @@
   manageable(intx, CMSWaitDuration, 2000,                                   \
           "Time in milliseconds that CMS thread waits for young GC")        \
                                                                             \
+  develop(uintx, CMSCheckInterval, 1000,                                    \
+          "Interval in milliseconds that CMS thread checks if it "          \
+          "should start a collection cycle")                                \
+                                                                            \
   product(bool, CMSYield, true,                                             \
           "Yield between steps of concurrent mark & sweep")                 \
                                                                             \
--- a/hotspot/src/share/vm/runtime/synchronizer.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/runtime/synchronizer.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -813,6 +813,7 @@
   }
 
   if (owner != NULL) {
+    // owning_thread_from_monitor_owner() may also return NULL here
     return Threads::owning_thread_from_monitor_owner(owner, doLock);
   }
 
--- a/hotspot/src/share/vm/runtime/thread.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -4285,7 +4285,9 @@
       if (owner == (address)p) return p;
     }
   }
-  assert(UseHeavyMonitors == false, "Did not find owning Java thread with UseHeavyMonitors enabled");
+  // Cannot assert on lack of success here since this function may be
+  // used by code that is trying to report useful problem information
+  // like deadlock detection.
   if (UseHeavyMonitors) return NULL;
 
   //
@@ -4303,7 +4305,7 @@
       }
     }
   }
-  assert(the_owner != NULL, "Did not find owning Java thread for lock word address");
+  // cannot assert on lack of success here; see above comment
   return the_owner;
 }
 
--- a/hotspot/src/share/vm/services/memReporter.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/services/memReporter.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -419,7 +419,7 @@
       _output->print_cr("[" PTR_FORMAT "] %s+0x%x", pc, buf, offset);
       _output->print("%28s", " ");
     } else {
-      _output->print("[" PTR_FORMAT "]%18s", " ");
+      _output->print("[" PTR_FORMAT "]%18s", pc, " ");
     }
 
     _output->print_cr("(mmap: reserved=%d%s, committed=%d%s)",
@@ -596,7 +596,7 @@
         _output->print_cr("[" PTR_FORMAT "] %s+0x%x", pc, buf, offset);
         _output->print("%28s", " ");
       } else {
-        _output->print("[" PTR_FORMAT "]%18s", " ");
+        _output->print("[" PTR_FORMAT "]%18s", pc, " ");
       }
     }
 
--- a/hotspot/src/share/vm/services/threadService.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/services/threadService.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -327,8 +327,28 @@
     while (waitingToLockMonitor != NULL || waitingToLockBlocker != NULL) {
       cycle->add_thread(currentThread);
       if (waitingToLockMonitor != NULL) {
-        currentThread = Threads::owning_thread_from_monitor_owner((address)waitingToLockMonitor->owner(),
-                                                                  false /* no locking needed */);
+        currentThread = Threads::owning_thread_from_monitor_owner(
+                          (address)waitingToLockMonitor->owner(),
+                          false /* no locking needed */);
+        if (currentThread == NULL) {
+          // This function is called at a safepoint so the JavaThread
+          // that owns waitingToLockMonitor should be findable, but
+          // if it is not findable, then the previous currentThread is
+          // blocked permanently. We record this as a deadlock.
+          num_deadlocks++;
+
+          cycle->set_deadlock(true);
+
+          // add this cycle to the deadlocks list
+          if (deadlocks == NULL) {
+            deadlocks = cycle;
+          } else {
+            last->set_next(cycle);
+          }
+          last = cycle;
+          cycle = new DeadlockCycle();
+          break;
+        }
       } else {
         if (concurrent_locks) {
           if (waitingToLockBlocker->is_a(SystemDictionary::abstract_ownable_synchronizer_klass())) {
@@ -841,7 +861,17 @@
         owner_desc = " (JVMTI raw monitor),\n  which is held by";
       }
       currentThread = Threads::owning_thread_from_monitor_owner(
-        (address)waitingToLockMonitor->owner(), false /* no locking needed */);
+                        (address)waitingToLockMonitor->owner(),
+                        false /* no locking needed */);
+      if (currentThread == NULL) {
+        // The deadlock was detected at a safepoint so the JavaThread
+        // that owns waitingToLockMonitor should be findable, but
+        // if it is not findable, then the previous currentThread is
+        // blocked permanently.
+        st->print("%s UNKNOWN_owner_addr=" PTR_FORMAT, owner_desc,
+                  (address)waitingToLockMonitor->owner());
+        continue;
+      }
     } else {
       st->print("  waiting for ownable synchronizer " INTPTR_FORMAT ", (a %s)",
                 (address)waitingToLockBlocker,
--- a/hotspot/src/share/vm/utilities/numberSeq.cpp	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/src/share/vm/utilities/numberSeq.cpp	Fri Mar 08 08:10:00 2013 -0800
@@ -245,7 +245,7 @@
 
 void NumberSeq::dump_on(outputStream* s) {
   AbsSeq::dump_on(s);
-  s->print_cr("\t\t _last = %7.3f, _maximum = %7.3f");
+  s->print_cr("\t\t _last = %7.3f, _maximum = %7.3f", _last, _maximum);
 }
 
 void TruncatedSeq::dump_on(outputStream* s) {
--- a/hotspot/test/compiler/6431242/Test.java	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/test/compiler/6431242/Test.java	Fri Mar 08 08:10:00 2013 -0800
@@ -25,7 +25,7 @@
 /*
  * @test
  * @bug 6431242
- * @run main/othervm -server -XX:+PrintCompilation Test
+ * @run main Test
  */
 
 public class Test{
--- a/hotspot/test/compiler/6589834/Test_ia32.java	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/test/compiler/6589834/Test_ia32.java	Fri Mar 08 08:10:00 2013 -0800
@@ -26,7 +26,7 @@
  * @bug 6589834
  * @summary deoptimization problem with -XX:+DeoptimizeALot
  *
- * @run main/othervm -server Test_ia32
+ * @run main Test_ia32
  */
 
 /***************************************************************************************
--- a/hotspot/test/compiler/6636138/Test1.java	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/test/compiler/6636138/Test1.java	Fri Mar 08 08:10:00 2013 -0800
@@ -26,7 +26,7 @@
  * @bug 6636138
  * @summary SuperWord::co_locate_pack(Node_List* p) generates memory graph that leads to memory order violation.
  *
- * @run main/othervm -server -Xbatch -XX:CompileOnly=Test1.init Test1
+ * @run main/othervm -Xbatch -XX:CompileOnly=Test1.init Test1
  */
 
 public class Test1 {
--- a/hotspot/test/compiler/6636138/Test2.java	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/test/compiler/6636138/Test2.java	Fri Mar 08 08:10:00 2013 -0800
@@ -26,7 +26,7 @@
  * @bug 6636138
  * @summary SuperWord::co_locate_pack(Node_List* p) generates memory graph that leads to memory order violation.
  *
- * @run main/othervm -server -Xbatch -XX:CompileOnly=Test2.shift Test2
+ * @run main/othervm -Xbatch -XX:CompileOnly=Test2.shift Test2
  */
 
 public class Test2 {
--- a/hotspot/test/compiler/6795161/Test.java	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/test/compiler/6795161/Test.java	Fri Mar 08 08:10:00 2013 -0800
@@ -26,7 +26,7 @@
  * @test
  * @bug 6795161
  * @summary Escape analysis leads to data corruption
- * @run main/othervm -server  -XX:+IgnoreUnrecognizedVMOptions -Xcomp -XX:CompileOnly=Test -XX:+DoEscapeAnalysis Test
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xcomp -XX:CompileOnly=Test -XX:+DoEscapeAnalysis Test
  */
 
 class Test_Class_1 {
--- a/hotspot/test/compiler/6946040/TestCharShortByteSwap.java	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/test/compiler/6946040/TestCharShortByteSwap.java	Fri Mar 08 08:10:00 2013 -0800
@@ -26,7 +26,7 @@
  * @test
  * @bug 6946040
  * @summary Tests Character/Short.reverseBytes and their intrinsics implementation in the server compiler
- * @run main/othervm -Xbatch -server -XX:CompileOnly=.testChar,.testShort TestCharShortByteSwap
+ * @run main/othervm -Xbatch -XX:CompileOnly=.testChar,.testShort TestCharShortByteSwap
  */
 
 // This test must run without any command line arguments.
--- a/hotspot/test/compiler/7068051/Test7068051.sh	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/test/compiler/7068051/Test7068051.sh	Fri Mar 08 08:10:00 2013 -0800
@@ -45,5 +45,5 @@
 
 ${TESTJAVA}/bin/javac -d . Test7068051.java
 
-${TESTJAVA}/bin/java -showversion -Xbatch ${TESTVMOPTS} Test7068051 foo.jar
+${TESTJAVA}/bin/java ${TESTVMOPTS} -showversion -Xbatch Test7068051 foo.jar
 
--- a/hotspot/test/compiler/8000805/Test8000805.java	Thu Mar 07 11:17:36 2013 -0800
+++ b/hotspot/test/compiler/8000805/Test8000805.java	Fri Mar 08 08:10:00 2013 -0800
@@ -26,7 +26,7 @@
  * @bug 8000805
  * @summary JMM issue: short loads are non-atomic
  *
- * @run main/othervm -server -XX:-TieredCompilation -Xcomp -XX:+PrintCompilation -XX:CompileOnly=Test8000805.loadS2LmaskFF,Test8000805.loadS2Lmask16,Test8000805.loadS2Lmask13,Test8000805.loadUS_signExt,Test8000805.loadB2L_mask8 Test8000805
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation -Xcomp -XX:+PrintCompilation -XX:CompileOnly=Test8000805.loadS2LmaskFF,Test8000805.loadS2Lmask16,Test8000805.loadS2Lmask13,Test8000805.loadUS_signExt,Test8000805.loadB2L_mask8 Test8000805
  */
 
 public class Test8000805 {