Merge
authoramurillo
Fri, 23 Dec 2011 15:24:43 -0800
changeset 11260 1020a9a071b7
parent 11245 4463cecfeda8 (current diff)
parent 11259 f3c6f6231de5 (diff)
child 11261 a8d0b94dcf7c
Merge
--- a/hotspot/make/hotspot_version	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/make/hotspot_version	Fri Dec 23 15:24:43 2011 -0800
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=23
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=08
+HS_BUILD_NUMBER=09
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -5968,7 +5968,9 @@
   assert(number_of_arguments >= 0   , "cannot have negative number of arguments");
   LP64_ONLY(assert(java_thread == r15_thread, "unexpected register"));
 #ifdef ASSERT
-  LP64_ONLY(if (UseCompressedOops) verify_heapbase("call_VM_base");)
+  // TraceBytecodes does not use r12 but saves it over the call, so don't verify
+  // r12 is the heapbase.
+  LP64_ONLY(if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base");)
 #endif // ASSERT
 
   assert(java_thread != oop_result  , "cannot use the same register for java_thread & oop_result");
--- a/hotspot/src/os/bsd/vm/jvm_bsd.h	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/os/bsd/vm/jvm_bsd.h	Fri Dec 23 15:24:43 2011 -0800
@@ -33,7 +33,6 @@
 // All local includes have been commented out.
 */
 
-
 #ifndef JVM_MD_H
 #define JVM_MD_H
 
@@ -59,6 +58,7 @@
 
 #include <dirent.h>             /* For DIR */
 #include <sys/param.h>          /* For MAXPATHLEN */
+#include <sys/socket.h>         /* For socklen_t */
 #include <unistd.h>             /* For F_OK, R_OK, W_OK */
 
 #define JNI_ONLOAD_SYMBOLS      {"JNI_OnLoad"}
@@ -128,8 +128,4 @@
 #endif
 #endif /* JVM_MD_H */
 
-// Reconciliation History
-// jvm_solaris.h        1.6 99/06/22 16:38:47
-// End
-
 #endif // OS_BSD_VM_JVM_BSD_H
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -150,7 +150,6 @@
 
 // for timer info max values which include all bits
 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
-#define SEC_IN_NANOSECS  1000000000LL
 
 #define LARGEPAGES_BIT (1 << 6)
 ////////////////////////////////////////////////////////////////////////////////
@@ -3445,8 +3444,6 @@
 // generates a SIGUSRx signal. Note that SIGUSR1 can interfere with
 // SIGSEGV, see 4355769.
 
-const int NANOSECS_PER_MILLISECS = 1000000;
-
 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
   assert(thread == Thread::current(),  "thread consistency check");
 
@@ -3469,7 +3466,7 @@
         // not a guarantee() because JVM should not abort on kernel/glibc bugs
         assert(!Bsd::supports_monotonic_clock(), "time moving backwards");
       } else {
-        millis -= (newtime - prevtime) / NANOSECS_PER_MILLISECS;
+        millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
       }
 
       if(millis <= 0) {
@@ -3508,7 +3505,7 @@
         // not a guarantee() because JVM should not abort on kernel/glibc bugs
         assert(!Bsd::supports_monotonic_clock(), "time moving backwards");
       } else {
-        millis -= (newtime - prevtime) / NANOSECS_PER_MILLISECS;
+        millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
       }
 
       if(millis <= 0) break ;
@@ -4197,7 +4194,7 @@
   int rc = os::Bsd::clock_gettime(clockid, &tp);
   assert(rc == 0, "clock_gettime is expected to return 0 code");
 
-  return (tp.tv_sec * SEC_IN_NANOSECS) + tp.tv_nsec;
+  return (tp.tv_sec * NANOSECS_PER_SEC) + tp.tv_nsec;
 }
 #endif
 
@@ -5522,9 +5519,6 @@
  * is no need to track notifications.
  */
 
-
-#define NANOSECS_PER_SEC 1000000000
-#define NANOSECS_PER_MILLISEC 1000000
 #define MAX_SECS 100000000
 /*
  * This code is common to bsd and solaris and will be moved to a
--- a/hotspot/src/os/bsd/vm/os_bsd.inline.hpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/os/bsd/vm/os_bsd.inline.hpp	Fri Dec 23 15:24:43 2011 -0800
@@ -198,15 +198,15 @@
   return ::socket(domain, type, protocol);
 }
 
-inline int os::recv(int fd, char *buf, int nBytes, int flags) {
-  RESTARTABLE_RETURN_INT(::recv(fd, buf, nBytes, (unsigned int) flags));
+inline int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
+  RESTARTABLE_RETURN_INT(::recv(fd, buf, nBytes, flags));
 }
 
-inline int os::send(int fd, char *buf, int nBytes, int flags) {
-  RESTARTABLE_RETURN_INT(::send(fd, buf, nBytes, (unsigned int) flags));
+inline int os::send(int fd, char* buf, size_t nBytes, uint flags) {
+  RESTARTABLE_RETURN_INT(::send(fd, buf, nBytes, flags));
 }
 
-inline int os::raw_send(int fd, char *buf, int nBytes, int flags) {
+inline int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
   return os::send(fd, buf, nBytes, flags);
 }
 
@@ -246,57 +246,52 @@
   return ::listen(fd, count);
 }
 
-inline int os::connect(int fd, struct sockaddr *him, int len) {
+inline int os::connect(int fd, struct sockaddr* him, socklen_t len) {
   RESTARTABLE_RETURN_INT(::connect(fd, him, len));
 }
 
-inline int os::accept(int fd, struct sockaddr *him, int *len) {
-  // This cast is from int to unsigned int on bsd.  Since we
-  // only pass the parameter "len" around the vm and don't try to
-  // fetch it's value, this cast is safe for now. The java.net group
-  // may need and want to change this interface someday if socklen_t goes
-  // to 64 bits on some platform that we support.
-
+inline int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
   // At least OpenBSD and FreeBSD can return EINTR from accept.
-  RESTARTABLE_RETURN_INT(::accept(fd, him, (socklen_t *)len));
+  RESTARTABLE_RETURN_INT(::accept(fd, him, len));
 }
 
-inline int os::recvfrom(int fd, char *buf, int nBytes, int flags,
-                         sockaddr *from, int *fromlen) {
-  RESTARTABLE_RETURN_INT(::recvfrom(fd, buf, nBytes, (unsigned int) flags, from, (socklen_t *)fromlen));
+inline int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
+                         sockaddr* from, socklen_t* fromlen) {
+  RESTARTABLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen));
 }
 
-inline int os::sendto(int fd, char *buf, int len, int flags,
-                        struct sockaddr *to, int tolen) {
-  RESTARTABLE_RETURN_INT(::sendto(fd, buf, len, (unsigned int) flags, to, tolen));
+inline int os::sendto(int fd, char* buf, size_t len, uint flags,
+                      struct sockaddr *to, socklen_t tolen) {
+  RESTARTABLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen));
 }
 
-inline int os::socket_shutdown(int fd, int howto){
+inline int os::socket_shutdown(int fd, int howto) {
   return ::shutdown(fd, howto);
 }
 
-inline int os::bind(int fd, struct sockaddr *him, int len){
+inline int os::bind(int fd, struct sockaddr* him, socklen_t len) {
   return ::bind(fd, him, len);
 }
 
-inline int os::get_sock_name(int fd, struct sockaddr *him, int *len){
-  return ::getsockname(fd, him, (socklen_t *)len);
+inline int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) {
+  return ::getsockname(fd, him, len);
 }
 
-inline int os::get_host_name(char* name, int namelen){
+inline int os::get_host_name(char* name, int namelen) {
   return ::gethostname(name, namelen);
 }
 
-inline struct hostent*  os::get_host_by_name(char* name) {
+inline struct hostent* os::get_host_by_name(char* name) {
   return ::gethostbyname(name);
 }
+
 inline int os::get_sock_opt(int fd, int level, int optname,
-                             char *optval, int* optlen){
-  return ::getsockopt(fd, level, optname, optval, (socklen_t *)optlen);
+                            char *optval, socklen_t* optlen) {
+  return ::getsockopt(fd, level, optname, optval, optlen);
 }
 
 inline int os::set_sock_opt(int fd, int level, int optname,
-                             const char *optval, int optlen){
+                            const char* optval, socklen_t optlen) {
   return ::setsockopt(fd, level, optname, optval, optlen);
 }
 #endif // OS_BSD_VM_OS_BSD_INLINE_HPP
--- a/hotspot/src/os/linux/vm/jvm_linux.h	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/os/linux/vm/jvm_linux.h	Fri Dec 23 15:24:43 2011 -0800
@@ -33,7 +33,6 @@
 // All local includes have been commented out.
 */
 
-
 #ifndef JVM_MD_H
 #define JVM_MD_H
 
@@ -44,6 +43,7 @@
 
 #include <dirent.h>             /* For DIR */
 #include <sys/param.h>          /* For MAXPATHLEN */
+#include <sys/socket.h>         /* For socklen_t */
 #include <unistd.h>             /* For F_OK, R_OK, W_OK */
 
 #define JNI_ONLOAD_SYMBOLS      {"JNI_OnLoad"}
@@ -95,8 +95,4 @@
 
 #endif /* JVM_MD_H */
 
-// Reconciliation History
-// jvm_solaris.h        1.6 99/06/22 16:38:47
-// End
-
 #endif // OS_LINUX_VM_JVM_LINUX_H
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -127,7 +127,6 @@
 
 // for timer info max values which include all bits
 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
-#define SEC_IN_NANOSECS  1000000000LL
 
 #define LARGEPAGES_BIT (1 << 6)
 ////////////////////////////////////////////////////////////////////////////////
@@ -3259,8 +3258,6 @@
 // generates a SIGUSRx signal. Note that SIGUSR1 can interfere with
 // SIGSEGV, see 4355769.
 
-const int NANOSECS_PER_MILLISECS = 1000000;
-
 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
   assert(thread == Thread::current(),  "thread consistency check");
 
@@ -3283,7 +3280,7 @@
         // not a guarantee() because JVM should not abort on kernel/glibc bugs
         assert(!Linux::supports_monotonic_clock(), "time moving backwards");
       } else {
-        millis -= (newtime - prevtime) / NANOSECS_PER_MILLISECS;
+        millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
       }
 
       if(millis <= 0) {
@@ -3322,7 +3319,7 @@
         // not a guarantee() because JVM should not abort on kernel/glibc bugs
         assert(!Linux::supports_monotonic_clock(), "time moving backwards");
       } else {
-        millis -= (newtime - prevtime) / NANOSECS_PER_MILLISECS;
+        millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
       }
 
       if(millis <= 0) break ;
@@ -3924,7 +3921,7 @@
   int rc = os::Linux::clock_gettime(clockid, &tp);
   assert(rc == 0, "clock_gettime is expected to return 0 code");
 
-  return (tp.tv_sec * SEC_IN_NANOSECS) + tp.tv_nsec;
+  return (tp.tv_sec * NANOSECS_PER_SEC) + tp.tv_nsec;
 }
 
 /////
@@ -5165,9 +5162,6 @@
  * is no need to track notifications.
  */
 
-
-#define NANOSECS_PER_SEC 1000000000
-#define NANOSECS_PER_MILLISEC 1000000
 #define MAX_SECS 100000000
 /*
  * This code is common to linux and solaris and will be moved to a
--- a/hotspot/src/os/linux/vm/os_linux.inline.hpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/os/linux/vm/os_linux.inline.hpp	Fri Dec 23 15:24:43 2011 -0800
@@ -202,15 +202,15 @@
   return ::socket(domain, type, protocol);
 }
 
-inline int os::recv(int fd, char *buf, int nBytes, int flags) {
-  RESTARTABLE_RETURN_INT(::recv(fd, buf, nBytes, (unsigned int) flags));
+inline int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
+  RESTARTABLE_RETURN_INT(::recv(fd, buf, nBytes, flags));
 }
 
-inline int os::send(int fd, char *buf, int nBytes, int flags) {
-  RESTARTABLE_RETURN_INT(::send(fd, buf, nBytes, (unsigned int) flags));
+inline int os::send(int fd, char* buf, size_t nBytes, uint flags) {
+  RESTARTABLE_RETURN_INT(::send(fd, buf, nBytes, flags));
 }
 
-inline int os::raw_send(int fd, char *buf, int nBytes, int flags) {
+inline int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
   return os::send(fd, buf, nBytes, flags);
 }
 
@@ -250,57 +250,53 @@
   return ::listen(fd, count);
 }
 
-inline int os::connect(int fd, struct sockaddr *him, int len) {
+inline int os::connect(int fd, struct sockaddr* him, socklen_t len) {
   RESTARTABLE_RETURN_INT(::connect(fd, him, len));
 }
 
-inline int os::accept(int fd, struct sockaddr *him, int *len) {
-  // This cast is from int to unsigned int on linux.  Since we
-  // only pass the parameter "len" around the vm and don't try to
-  // fetch it's value, this cast is safe for now. The java.net group
-  // may need and want to change this interface someday if socklen_t goes
-  // to 64 bits on some platform that we support.
-  // Linux doc says this can't return EINTR, unlike accept() on Solaris
-
-  return ::accept(fd, him, (socklen_t *)len);
+inline int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
+  // Linux doc says this can't return EINTR, unlike accept() on Solaris.
+  // But see attachListener_linux.cpp, LinuxAttachListener::dequeue().
+  return (int)::accept(fd, him, len);
 }
 
-inline int os::recvfrom(int fd, char *buf, int nBytes, int flags,
-                         sockaddr *from, int *fromlen) {
-  RESTARTABLE_RETURN_INT(::recvfrom(fd, buf, nBytes, (unsigned int) flags, from, (socklen_t *)fromlen));
+inline int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
+                        sockaddr* from, socklen_t* fromlen) {
+  RESTARTABLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen));
 }
 
-inline int os::sendto(int fd, char *buf, int len, int flags,
-                        struct sockaddr *to, int tolen) {
-  RESTARTABLE_RETURN_INT(::sendto(fd, buf, len, (unsigned int) flags, to, tolen));
+inline int os::sendto(int fd, char* buf, size_t len, uint flags,
+                      struct sockaddr* to, socklen_t tolen) {
+  RESTARTABLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen));
 }
 
-inline int os::socket_shutdown(int fd, int howto){
+inline int os::socket_shutdown(int fd, int howto) {
   return ::shutdown(fd, howto);
 }
 
-inline int os::bind(int fd, struct sockaddr *him, int len){
+inline int os::bind(int fd, struct sockaddr* him, socklen_t len) {
   return ::bind(fd, him, len);
 }
 
-inline int os::get_sock_name(int fd, struct sockaddr *him, int *len){
-  return ::getsockname(fd, him, (socklen_t *)len);
+inline int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) {
+  return ::getsockname(fd, him, len);
 }
 
-inline int os::get_host_name(char* name, int namelen){
+inline int os::get_host_name(char* name, int namelen) {
   return ::gethostname(name, namelen);
 }
 
-inline struct hostent*  os::get_host_by_name(char* name) {
+inline struct hostent* os::get_host_by_name(char* name) {
   return ::gethostbyname(name);
 }
+
 inline int os::get_sock_opt(int fd, int level, int optname,
-                             char *optval, int* optlen){
-  return ::getsockopt(fd, level, optname, optval, (socklen_t *)optlen);
+                            char* optval, socklen_t* optlen) {
+  return ::getsockopt(fd, level, optname, optval, optlen);
 }
 
 inline int os::set_sock_opt(int fd, int level, int optname,
-                             const char *optval, int optlen){
+                            const char* optval, socklen_t optlen) {
   return ::setsockopt(fd, level, optname, optval, optlen);
 }
 #endif // OS_LINUX_VM_OS_LINUX_INLINE_HPP
--- a/hotspot/src/os/solaris/vm/jvm_solaris.h	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/os/solaris/vm/jvm_solaris.h	Fri Dec 23 15:24:43 2011 -0800
@@ -33,7 +33,6 @@
 // All local includes have been commented out.
 */
 
-
 #ifndef JVM_MD_H
 #define JVM_MD_H
 
@@ -44,6 +43,7 @@
 
 #include <dirent.h>             /* For DIR */
 #include <sys/param.h>          /* For MAXPATHLEN */
+#include <sys/socket.h>         /* For socklen_t */
 #include <unistd.h>             /* For F_OK, R_OK, W_OK */
 #include <sys/int_types.h>      /* for intptr_t types (64 Bit cleanliness) */
 
@@ -82,7 +82,6 @@
 #define JVM_O_EXCL       O_EXCL
 #define JVM_O_CREAT      O_CREAT
 
-
 /* Signal definitions */
 
 #define BREAK_SIGNAL     SIGQUIT           /* Thread dumping support.    */
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -1674,7 +1674,6 @@
 }
 
 
-const int NANOSECS_PER_MILLISECS = 1000000;
 // gethrtime can move backwards if read from one cpu and then a different cpu
 // getTimeNanos is guaranteed to not move backward on Solaris
 // local spinloop created as faster for a CAS on an int than
@@ -1803,7 +1802,7 @@
 // getTimeMillis guaranteed to not move backwards on Solaris
 jlong getTimeMillis() {
   jlong nanotime = getTimeNanos();
-  return (jlong)(nanotime / NANOSECS_PER_MILLISECS);
+  return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
 }
 
 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
@@ -6064,10 +6063,7 @@
  * is no need to track notifications.
  */
 
-#define NANOSECS_PER_SEC 1000000000
-#define NANOSECS_PER_MILLISEC 1000000
 #define MAX_SECS 100000000
-
 /*
  * This code is common to linux and solaris and will be moved to a
  * common place in dolphin.
@@ -6363,17 +6359,16 @@
   RESTARTABLE_RETURN_INT(::close(fd));
 }
 
-int os::recv(int fd, char *buf, int nBytes, int flags) {
-  INTERRUPTIBLE_RETURN_INT(::recv(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
-}
-
-
-int os::send(int fd, char *buf, int nBytes, int flags) {
-  INTERRUPTIBLE_RETURN_INT(::send(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
-}
-
-int os::raw_send(int fd, char *buf, int nBytes, int flags) {
-  RESTARTABLE_RETURN_INT(::send(fd, buf, nBytes, flags));
+int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
+  INTERRUPTIBLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
+}
+
+int os::send(int fd, char* buf, size_t nBytes, uint flags) {
+  INTERRUPTIBLE_RETURN_INT((int)::send(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
+}
+
+int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
+  RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
 }
 
 // As both poll and select can be interrupted by signals, we have to be
@@ -6408,19 +6403,19 @@
   }
 }
 
-int os::connect(int fd, struct sockaddr *him, int len) {
+int os::connect(int fd, struct sockaddr *him, socklen_t len) {
   int _result;
-  INTERRUPTIBLE_NORESTART(::connect(fd, him, len), _result,
+  INTERRUPTIBLE_NORESTART(::connect(fd, him, len), _result,\
                           os::Solaris::clear_interrupted);
 
   // Depending on when thread interruption is reset, _result could be
   // one of two values when errno == EINTR
 
   if (((_result == OS_INTRPT) || (_result == OS_ERR))
-                                        && (errno == EINTR)) {
+      && (errno == EINTR)) {
      /* restarting a connect() changes its errno semantics */
-     INTERRUPTIBLE(::connect(fd, him, len), _result,
-                     os::Solaris::clear_interrupted);
+     INTERRUPTIBLE(::connect(fd, him, len), _result,\
+                   os::Solaris::clear_interrupted);
      /* undo these changes */
      if (_result == OS_ERR) {
        if (errno == EALREADY) {
@@ -6434,43 +6429,38 @@
    return _result;
  }
 
-int os::accept(int fd, struct sockaddr *him, int *len) {
-  if (fd < 0)
-   return OS_ERR;
-  INTERRUPTIBLE_RETURN_INT((int)::accept(fd, him,\
-    (socklen_t*) len), os::Solaris::clear_interrupted);
- }
-
-int os::recvfrom(int fd, char *buf, int nBytes, int flags,
-                             sockaddr *from, int *fromlen) {
-   //%%note jvm_r11
-  INTERRUPTIBLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes,\
-    flags, from, fromlen), os::Solaris::clear_interrupted);
-}
-
-int os::sendto(int fd, char *buf, int len, int flags,
-                           struct sockaddr *to, int tolen) {
-  //%%note jvm_r11
-  INTERRUPTIBLE_RETURN_INT((int)::sendto(fd, buf, len, flags,\
-    to, tolen), os::Solaris::clear_interrupted);
+int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
+  if (fd < 0) {
+    return OS_ERR;
+  }
+  INTERRUPTIBLE_RETURN_INT((int)::accept(fd, him, len),\
+                           os::Solaris::clear_interrupted);
+}
+
+int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
+                 sockaddr* from, socklen_t* fromlen) {
+  INTERRUPTIBLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen),\
+                           os::Solaris::clear_interrupted);
+}
+
+int os::sendto(int fd, char* buf, size_t len, uint flags,
+               struct sockaddr* to, socklen_t tolen) {
+  INTERRUPTIBLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen),\
+                           os::Solaris::clear_interrupted);
 }
 
 int os::socket_available(int fd, jint *pbytes) {
-   if (fd < 0)
-     return OS_OK;
-
-   int ret;
-
-   RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
-
-   //%% note ioctl can return 0 when successful, JVM_SocketAvailable
-   // is expected to return 0 on failure and 1 on success to the jdk.
-
-   return (ret == OS_ERR) ? 0 : 1;
-}
-
-
-int os::bind(int fd, struct sockaddr *him, int len) {
+  if (fd < 0) {
+    return OS_OK;
+  }
+  int ret;
+  RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
+  // note: ioctl can return 0 when successful, JVM_SocketAvailable
+  // is expected to return 0 on failure and 1 on success to the jdk.
+  return (ret == OS_ERR) ? 0 : 1;
+}
+
+int os::bind(int fd, struct sockaddr* him, socklen_t len) {
    INTERRUPTIBLE_RETURN_INT_NORESTART(::bind(fd, him, len),\
-     os::Solaris::clear_interrupted);
-}
+                                      os::Solaris::clear_interrupted);
+}
--- a/hotspot/src/os/solaris/vm/os_solaris.inline.hpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/os/solaris/vm/os_solaris.inline.hpp	Fri Dec 23 15:24:43 2011 -0800
@@ -243,24 +243,25 @@
   return ::shutdown(fd, howto);
 }
 
-inline int os::get_sock_name(int fd, struct sockaddr *him, int *len){
-  return ::getsockname(fd, him, (socklen_t*) len);
+inline int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len){
+  return ::getsockname(fd, him, len);
 }
 
 inline int os::get_host_name(char* name, int namelen){
   return ::gethostname(name, namelen);
 }
 
-inline struct hostent*  os::get_host_by_name(char* name) {
+inline struct hostent* os::get_host_by_name(char* name) {
   return ::gethostbyname(name);
 }
+
 inline int os::get_sock_opt(int fd, int level, int optname,
-                             char *optval, int* optlen){
-  return ::getsockopt(fd, level, optname, optval, (socklen_t*) optlen);
+                            char* optval, socklen_t* optlen) {
+  return ::getsockopt(fd, level, optname, optval, optlen);
 }
 
 inline int os::set_sock_opt(int fd, int level, int optname,
-                             const char *optval, int optlen){
+                            const char *optval, socklen_t optlen) {
   return ::setsockopt(fd, level, optname, optval, optlen);
 }
 #endif // OS_SOLARIS_VM_OS_SOLARIS_INLINE_HPP
--- a/hotspot/src/os/windows/vm/jvm_windows.h	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/os/windows/vm/jvm_windows.h	Fri Dec 23 15:24:43 2011 -0800
@@ -22,6 +22,9 @@
  *
  */
 
+#ifndef OS_WINDOWS_VM_JVM_WINDOWS_H
+#define OS_WINDOWS_VM_JVM_WINDOWS_H
+
 #ifndef _JAVASOFT_JVM_MD_H_
 #define _JAVASOFT_JVM_MD_H_
 
@@ -54,9 +57,9 @@
 #include <Psapi.h>
 #endif
 
-
+#include <Tlhelp32.h>
 
-#include <Tlhelp32.h>
+typedef unsigned int socklen_t;
 
 // #include "jni.h"
 
@@ -129,3 +132,5 @@
 #define SHUTDOWN2_SIGNAL SIGTERM
 
 #endif /* !_JAVASOFT_JVM_MD_H_ */
+
+#endif // OS_WINDOWS_VM_JVM_WINDOWS_H
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -821,17 +821,15 @@
   }
 }
 
-#define NANOS_PER_SEC         CONST64(1000000000)
-#define NANOS_PER_MILLISEC    1000000
 jlong os::javaTimeNanos() {
   if (!has_performance_count) {
-    return javaTimeMillis() * NANOS_PER_MILLISEC; // the best we can do.
+    return javaTimeMillis() * NANOSECS_PER_MILLISEC; // the best we can do.
   } else {
     LARGE_INTEGER current_count;
     QueryPerformanceCounter(&current_count);
     double current = as_long(current_count);
     double freq = performance_frequency;
-    jlong time = (jlong)((current/freq) * NANOS_PER_SEC);
+    jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
     return time;
   }
 }
@@ -847,15 +845,15 @@
     info_ptr->may_skip_forward = true;
   } else {
     jlong freq = performance_frequency;
-    if (freq < NANOS_PER_SEC) {
+    if (freq < NANOSECS_PER_SEC) {
       // the performance counter is 64 bits and we will
       // be multiplying it -- so no wrap in 64 bits
       info_ptr->max_value = ALL_64_BITS;
-    } else if (freq > NANOS_PER_SEC) {
+    } else if (freq > NANOSECS_PER_SEC) {
       // use the max value the counter can reach to
       // determine the max value which could be returned
       julong max_counter = (julong)ALL_64_BITS;
-      info_ptr->max_value = (jlong)(max_counter / (freq / NANOS_PER_SEC));
+      info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
     } else {
       // the performance counter is 64 bits and we will
       // be using it directly -- so no wrap in 64 bits
@@ -4851,7 +4849,7 @@
   ::mutexUnlock(&sockFnTableMutex);
 }
 
-struct hostent*  os::get_host_by_name(char* name) {
+struct hostent* os::get_host_by_name(char* name) {
   if (!sock_initialized) {
     initSock();
   }
@@ -4882,39 +4880,39 @@
   return 0;
 }
 
-int os::connect(int fd, struct sockaddr *him, int len) {
+int os::connect(int fd, struct sockaddr* him, socklen_t len) {
   ShouldNotReachHere();
   return 0;
 }
 
-int os::accept(int fd, struct sockaddr *him, int *len) {
+int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
   ShouldNotReachHere();
   return 0;
 }
 
-int os::sendto(int fd, char *buf, int len, int flags,
-                        struct sockaddr *to, int tolen) {
+int os::sendto(int fd, char* buf, size_t len, uint flags,
+               struct sockaddr* to, socklen_t tolen) {
   ShouldNotReachHere();
   return 0;
 }
 
-int os::recvfrom(int fd, char *buf, int nBytes, int flags,
-                         sockaddr *from, int *fromlen) {
+int os::recvfrom(int fd, char *buf, size_t nBytes, uint flags,
+                 sockaddr* from, socklen_t* fromlen) {
   ShouldNotReachHere();
   return 0;
 }
 
-int os::recv(int fd, char *buf, int nBytes, int flags) {
+int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
   ShouldNotReachHere();
   return 0;
 }
 
-int os::send(int fd, char *buf, int nBytes, int flags) {
+int os::send(int fd, char* buf, size_t nBytes, uint flags) {
   ShouldNotReachHere();
   return 0;
 }
 
-int os::raw_send(int fd, char *buf, int nBytes, int flags) {
+int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
   ShouldNotReachHere();
   return 0;
 }
@@ -4934,24 +4932,24 @@
   return 0;
 }
 
-int os::bind(int fd, struct sockaddr *him, int len) {
+int os::bind(int fd, struct sockaddr* him, socklen_t len) {
   ShouldNotReachHere();
   return 0;
 }
 
-int os::get_sock_name(int fd, struct sockaddr *him, int *len) {
+int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) {
   ShouldNotReachHere();
   return 0;
 }
 
 int os::get_sock_opt(int fd, int level, int optname,
-                             char *optval, int* optlen) {
+                     char* optval, socklen_t* optlen) {
   ShouldNotReachHere();
   return 0;
 }
 
 int os::set_sock_opt(int fd, int level, int optname,
-                             const char *optval, int optlen) {
+                     const char* optval, socklen_t optlen) {
   ShouldNotReachHere();
   return 0;
 }
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Fri Dec 23 15:24:43 2011 -0800
@@ -336,12 +336,6 @@
                      unallocated_block() : end());
   }
 
-  // This is needed because the default implementation uses block_start()
-  // which can;t be used at certain times (for example phase 3 of mark-sweep).
-  // A better fix is to change the assertions in phase 3 of mark-sweep to
-  // use is_in_reserved(), but that is deferred since the is_in() assertions
-  // are buried through several layers of callers and are used elsewhere
-  // as well.
   bool is_in(const void* p) const {
     return used_region().contains(p);
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -1117,12 +1117,9 @@
 
 // Calculates the number of active workers for a concurrent
 // phase.
-int ConcurrentMark::calc_parallel_marking_threads() {
-
-  size_t n_conc_workers;
-  if (!G1CollectedHeap::use_parallel_gc_threads()) {
-    n_conc_workers = 1;
-  } else {
+size_t ConcurrentMark::calc_parallel_marking_threads() {
+  if (G1CollectedHeap::use_parallel_gc_threads()) {
+    size_t n_conc_workers = 0;
     if (!UseDynamicNumberOfGCThreads ||
         (!FLAG_IS_DEFAULT(ConcGCThreads) &&
          !ForceDynamicNumberOfGCThreads)) {
@@ -1137,9 +1134,13 @@
       // Don't scale down "n_conc_workers" by scale_parallel_threads() because
       // that scaling has already gone into "_max_parallel_marking_threads".
     }
+    assert(n_conc_workers > 0, "Always need at least 1");
+    return n_conc_workers;
   }
-  assert(n_conc_workers > 0, "Always need at least 1");
-  return (int) MAX2(n_conc_workers, (size_t) 1);
+  // If we are not running with any parallel GC threads we will not
+  // have spawned any marking threads either. Hence the number of
+  // concurrent workers should be 0.
+  return 0;
 }
 
 void ConcurrentMark::markFromRoots() {
@@ -1151,24 +1152,24 @@
   // stop-the-world GC happens even as we mark in this generation.
 
   _restart_for_overflow = false;
-
-  // Parallel task terminator is set in "set_phase()".
   force_overflow_conc()->init();
 
   // _g1h has _n_par_threads
-
   _parallel_marking_threads = calc_parallel_marking_threads();
   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
     "Maximum number of marking threads exceeded");
-  _parallel_workers->set_active_workers((int)_parallel_marking_threads);
-  // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
-  // and the decisions on that MT processing is made elsewhere.
-
-  assert( _parallel_workers->active_workers() > 0, "Should have been set");
-  set_phase(_parallel_workers->active_workers(), true /* concurrent */);
+
+  size_t active_workers = MAX2((size_t) 1, parallel_marking_threads());
+
+  // Parallel task terminator is set in "set_phase()"
+  set_phase(active_workers, true /* concurrent */);
 
   CMConcurrentMarkingTask markingTask(this, cmThread());
   if (parallel_marking_threads() > 0) {
+    _parallel_workers->set_active_workers((int)active_workers);
+    // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
+    // and the decisions on that MT processing is made elsewhere.
+    assert(_parallel_workers->active_workers() > 0, "Should have been set");
     _parallel_workers->run_task(&markingTask);
   } else {
     markingTask.work(0);
@@ -1765,8 +1766,7 @@
 
   HeapRegionRemSet::reset_for_cleanup_tasks();
 
-  g1h->set_par_threads();
-  size_t n_workers = g1h->n_par_threads();
+  size_t n_workers;
 
   // Do counting once more with the world stopped for good measure.
   G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(),
@@ -1776,8 +1776,10 @@
                                                HeapRegion::InitialClaimValue),
            "sanity check");
 
+    g1h->set_par_threads();
+    n_workers = g1h->n_par_threads();
     assert(g1h->n_par_threads() == (int) n_workers,
-      "Should not have been reset");
+           "Should not have been reset");
     g1h->workers()->run_task(&g1_par_count_task);
     // Done with the parallel phase so reset to 0.
     g1h->set_par_threads(0);
@@ -1786,6 +1788,7 @@
                                              HeapRegion::FinalCountClaimValue),
            "sanity check");
   } else {
+    n_workers = 1;
     g1_par_count_task.work(0);
   }
 
@@ -1851,7 +1854,6 @@
                            (note_end_end - note_end_start)*1000.0);
   }
 
-
   // call below, since it affects the metric by which we sort the heap
   // regions.
   if (G1ScrubRemSets) {
@@ -2329,9 +2331,9 @@
     }
   }
 
-  CMRemarkTask(ConcurrentMark* cm) :
+  CMRemarkTask(ConcurrentMark* cm, int active_workers) :
     AbstractGangTask("Par Remark"), _cm(cm) {
-    _cm->terminator()->reset_for_reuse(cm->_g1h->workers()->active_workers());
+    _cm->terminator()->reset_for_reuse(active_workers);
   }
 };
 
@@ -2357,7 +2359,7 @@
     // constructor and pass values of the active workers
     // through the gang in the task.
 
-    CMRemarkTask remarkTask(this);
+    CMRemarkTask remarkTask(this, active_workers);
     g1h->set_par_threads(active_workers);
     g1h->workers()->run_task(&remarkTask);
     g1h->set_par_threads(0);
@@ -2367,7 +2369,7 @@
     int active_workers = 1;
     set_phase(active_workers, false /* concurrent */);
 
-    CMRemarkTask remarkTask(this);
+    CMRemarkTask remarkTask(this, active_workers);
     // We will start all available threads, even if we decide that the
     // active_workers will be fewer. The extra ones will just bail out
     // immediately.
@@ -3123,13 +3125,12 @@
   }
 
   double start = os::elapsedTime();
-  int n_workers = g1h->workers()->total_workers();
-
   G1ParCompleteMarkInCSetTask complete_mark_task(g1h, this);
 
   assert(g1h->check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
 
   if (G1CollectedHeap::use_parallel_gc_threads()) {
+    int n_workers = g1h->workers()->active_workers();
     g1h->set_par_threads(n_workers);
     g1h->workers()->run_task(&complete_mark_task);
     g1h->set_par_threads(0);
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Fri Dec 23 15:24:43 2011 -0800
@@ -718,7 +718,7 @@
   size_t scale_parallel_threads(size_t n_par_threads);
 
   // Calculates the number of GC threads to be used in a concurrent phase.
-  int calc_parallel_marking_threads();
+  size_t calc_parallel_marking_threads();
 
   // The following three are interaction between CM and
   // G1CollectedHeap
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -1294,7 +1294,7 @@
     g1_policy()->stop_incremental_cset_building();
 
     tear_down_region_sets(false /* free_list_only */);
-    g1_policy()->set_full_young_gcs(true);
+    g1_policy()->set_gcs_are_young(true);
 
     // See the comments in g1CollectedHeap.hpp and
     // G1CollectedHeap::ref_processing_init() about
@@ -1842,7 +1842,9 @@
   _full_collections_completed(0),
   _in_cset_fast_test(NULL),
   _in_cset_fast_test_base(NULL),
-  _dirty_cards_region_list(NULL) {
+  _dirty_cards_region_list(NULL),
+  _worker_cset_start_region(NULL),
+  _worker_cset_start_region_time_stamp(NULL) {
   _g1h = this; // To catch bugs.
   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
     vm_exit_during_initialization("Failed necessary allocation.");
@@ -1863,12 +1865,17 @@
   }
   _rem_set_iterator = iter_arr;
 
+  _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues);
+  _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues);
+
   for (int i = 0; i < n_queues; i++) {
     RefToScanQueue* q = new RefToScanQueue();
     q->initialize();
     _task_queues->register_queue(i, q);
   }
 
+  clear_cset_start_regions();
+
   guarantee(_task_queues != NULL, "task_queues allocation failure.");
 }
 
@@ -2411,8 +2418,11 @@
 }
 
 bool G1CollectedHeap::is_in(const void* p) const {
-  HeapRegion* hr = _hrs.addr_to_region((HeapWord*) p);
-  if (hr != NULL) {
+  if (_g1_committed.contains(p)) {
+    // Given that we know that p is in the committed space,
+    // heap_region_containing_raw() should successfully
+    // return the containing region.
+    HeapRegion* hr = heap_region_containing_raw(p);
     return hr->is_in(p);
   } else {
     return _perm_gen->as_gen()->is_in(p);
@@ -2684,25 +2694,80 @@
 }
 #endif // ASSERT
 
-// We want the parallel threads to start their collection
-// set iteration at different collection set regions to
-// avoid contention.
-// If we have:
-//          n collection set regions
-//          p threads
-// Then thread t will start at region t * floor (n/p)
-
+// Clear the cached CSet starting regions and (more importantly)
+// the time stamps. Called when we reset the GC time stamp.
+void G1CollectedHeap::clear_cset_start_regions() {
+  assert(_worker_cset_start_region != NULL, "sanity");
+  assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
+
+  int n_queues = MAX2((int)ParallelGCThreads, 1);
+  for (int i = 0; i < n_queues; i++) {
+    _worker_cset_start_region[i] = NULL;
+    _worker_cset_start_region_time_stamp[i] = 0;
+  }
+}
+
+// Given the id of a worker, obtain or calculate a suitable
+// starting region for iterating over the current collection set.
 HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) {
-  HeapRegion* result = g1_policy()->collection_set();
+  assert(get_gc_time_stamp() > 0, "should have been updated by now");
+
+  HeapRegion* result = NULL;
+  unsigned gc_time_stamp = get_gc_time_stamp();
+
+  if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) {
+    // Cached starting region for current worker was set
+    // during the current pause - so it's valid.
+    // Note: the cached starting heap region may be NULL
+    // (when the collection set is empty).
+    result = _worker_cset_start_region[worker_i];
+    assert(result == NULL || result->in_collection_set(), "sanity");
+    return result;
+  }
+
+  // The cached entry was not valid so let's calculate
+  // a suitable starting heap region for this worker.
+
+  // We want the parallel threads to start their collection
+  // set iteration at different collection set regions to
+  // avoid contention.
+  // If we have:
+  //          n collection set regions
+  //          p threads
+  // Then thread t will start at region floor ((t * n) / p)
+
+  result = g1_policy()->collection_set();
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     size_t cs_size = g1_policy()->cset_region_length();
-    int n_workers = workers()->total_workers();
-    size_t cs_spans = cs_size / n_workers;
-    size_t ind      = cs_spans * worker_i;
-    for (size_t i = 0; i < ind; i++) {
+    int active_workers = workers()->active_workers();
+    assert(UseDynamicNumberOfGCThreads ||
+             active_workers == workers()->total_workers(),
+             "Unless dynamic should use total workers");
+
+    size_t end_ind   = (cs_size * worker_i) / active_workers;
+    size_t start_ind = 0;
+
+    if (worker_i > 0 &&
+        _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
+      // Previous workers starting region is valid
+      // so let's iterate from there
+      start_ind = (cs_size * (worker_i - 1)) / active_workers;
+      result = _worker_cset_start_region[worker_i - 1];
+    }
+
+    for (size_t i = start_ind; i < end_ind; i++) {
       result = result->next_in_collection_set();
     }
   }
+
+  // Note: the calculated starting heap region may be NULL
+  // (when the collection set is empty).
+  assert(result == NULL || result->in_collection_set(), "sanity");
+  assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
+         "should be updated only once per pause");
+  _worker_cset_start_region[worker_i] = result;
+  OrderAccess::storestore();
+  _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
   return result;
 }
 
@@ -3461,20 +3526,19 @@
     // for the duration of this pause.
     g1_policy()->decide_on_conc_mark_initiation();
 
-    // We do not allow initial-mark to be piggy-backed on a
-    // partially-young GC.
+    // We do not allow initial-mark to be piggy-backed on a mixed GC.
     assert(!g1_policy()->during_initial_mark_pause() ||
-            g1_policy()->full_young_gcs(), "sanity");
-
-    // We also do not allow partially-young GCs during marking.
-    assert(!mark_in_progress() || g1_policy()->full_young_gcs(), "sanity");
+            g1_policy()->gcs_are_young(), "sanity");
+
+    // We also do not allow mixed GCs during marking.
+    assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
 
     char verbose_str[128];
     sprintf(verbose_str, "GC pause ");
-    if (g1_policy()->full_young_gcs()) {
+    if (g1_policy()->gcs_are_young()) {
       strcat(verbose_str, "(young)");
     } else {
-      strcat(verbose_str, "(partial)");
+      strcat(verbose_str, "(mixed)");
     }
     if (g1_policy()->during_initial_mark_pause()) {
       strcat(verbose_str, " (initial-mark)");
@@ -3723,8 +3787,9 @@
         double end_time_sec = os::elapsedTime();
         double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
         g1_policy()->record_pause_time_ms(pause_time_ms);
-        int active_gc_threads = workers()->active_workers();
-        g1_policy()->record_collection_pause_end(active_gc_threads);
+        int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
+                                workers()->active_workers() : 1);
+        g1_policy()->record_collection_pause_end(active_workers);
 
         MemoryService::track_memory_usage();
 
@@ -5248,8 +5313,10 @@
   int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
                         workers()->active_workers() : 1);
 
-  assert(active_workers == workers()->active_workers(),
-         "Need to reset active_workers");
+  assert(!G1CollectedHeap::use_parallel_gc_threads() ||
+           active_workers == workers()->active_workers(),
+           "Need to reset active_workers");
+
   set_par_threads(active_workers);
   G1ParPreserveCMReferentsTask keep_cm_referents(this, active_workers, _task_queues);
 
@@ -5387,13 +5454,13 @@
     assert(UseDynamicNumberOfGCThreads ||
            n_workers == workers()->total_workers(),
            "If not dynamic should be using all the  workers");
+    workers()->set_active_workers(n_workers);
     set_par_threads(n_workers);
   } else {
     assert(n_par_threads() == 0,
            "Should be the original non-parallel value");
     n_workers = 1;
   }
-  workers()->set_active_workers(n_workers);
 
   G1ParTask g1_par_task(this, _task_queues);
 
@@ -5415,6 +5482,7 @@
     workers()->run_task(&g1_par_task);
   } else {
     StrongRootsScope srs(this);
+    g1_par_task.set_for_termination(n_workers);
     g1_par_task.work(0);
   }
 
@@ -5663,8 +5731,8 @@
     // Iterate over the dirty cards region list.
     G1ParCleanupCTTask cleanup_task(ct_bs, this);
 
-    if (ParallelGCThreads > 0) {
-      set_par_threads(workers()->total_workers());
+    if (G1CollectedHeap::use_parallel_gc_threads()) {
+      set_par_threads();
       workers()->run_task(&cleanup_task);
       set_par_threads(0);
     } else {
@@ -6072,8 +6140,9 @@
 void G1CollectedHeap::set_par_threads() {
   // Don't change the number of workers.  Use the value previously set
   // in the workgroup.
+  assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");
   int n_workers = workers()->active_workers();
-    assert(UseDynamicNumberOfGCThreads ||
+  assert(UseDynamicNumberOfGCThreads ||
            n_workers == workers()->total_workers(),
       "Otherwise should be using the total number of workers");
   if (n_workers == 0) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Dec 23 15:24:43 2011 -0800
@@ -943,6 +943,16 @@
   // discovery.
   G1CMIsAliveClosure _is_alive_closure_cm;
 
+  // Cache used by G1CollectedHeap::start_cset_region_for_worker().
+  HeapRegion** _worker_cset_start_region;
+
+  // Time stamp to validate the regions recorded in the cache
+  // used by G1CollectedHeap::start_cset_region_for_worker().
+  // The heap region entry for a given worker is valid iff
+  // the associated time stamp value matches the current value
+  // of G1CollectedHeap::_gc_time_stamp.
+  unsigned int* _worker_cset_start_region_time_stamp;
+
   enum G1H_process_strong_roots_tasks {
     G1H_PS_mark_stack_oops_do,
     G1H_PS_refProcessor_oops_do,
@@ -1030,6 +1040,9 @@
   void reset_gc_time_stamp() {
     _gc_time_stamp = 0;
     OrderAccess::fence();
+    // Clear the cached CSet starting regions and time stamps.
+    // Their validity is dependent on the GC timestamp.
+    clear_cset_start_regions();
   }
 
   void increment_gc_time_stamp() {
@@ -1196,7 +1209,7 @@
                                        HumongousRegionSet* humongous_proxy_set,
                                        bool par);
 
-  // Returns "TRUE" iff "p" points into the allocated area of the heap.
+  // Returns "TRUE" iff "p" points into the committed areas of the heap.
   virtual bool is_in(const void* p) const;
 
   // Return "TRUE" iff the given object address is within the collection
@@ -1300,9 +1313,12 @@
   bool check_cset_heap_region_claim_values(jint claim_value);
 #endif // ASSERT
 
-  // Given the id of a worker, calculate a suitable
-  // starting region for iterating over the current
-  // collection set.
+  // Clear the cached cset start regions and (more importantly)
+  // the time stamps. Called when we reset the GC time stamp.
+  void clear_cset_start_regions();
+
+  // Given the id of a worker, obtain or calculate a suitable
+  // starting region for iterating over the current collection set.
   HeapRegion* start_cset_region_for_worker(int worker_i);
 
   // Iterate over the regions (if any) in the current collection set.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -50,7 +50,7 @@
 };
 
 // all the same
-static double fully_young_cards_per_entry_ratio_defaults[] = {
+static double young_cards_per_entry_ratio_defaults[] = {
   1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
 };
 
@@ -168,11 +168,10 @@
   _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
-  _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
-  _partially_young_cards_per_entry_ratio_seq(
-                                         new TruncatedSeq(TruncatedSeqLength)),
+  _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
+  _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
-  _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
+  _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
@@ -185,9 +184,9 @@
 
   _pause_time_target_ms((double) MaxGCPauseMillis),
 
-  _full_young_gcs(true),
-  _full_young_pause_num(0),
-  _partial_young_pause_num(0),
+  _gcs_are_young(true),
+  _young_pause_num(0),
+  _mixed_pause_num(0),
 
   _during_marking(false),
   _in_marking_window(false),
@@ -198,7 +197,8 @@
 
   _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
 
-   _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
+  _recent_prev_end_times_for_all_gcs_sec(
+                                new TruncatedSeq(NumPrevPausesForHeuristics)),
 
   _recent_avg_pause_time_ratio(0.0),
 
@@ -206,8 +206,9 @@
 
   _initiate_conc_mark_if_possible(false),
   _during_initial_mark_pause(false),
-  _should_revert_to_full_young_gcs(false),
-  _last_full_young_gc(false),
+  _should_revert_to_young_gcs(false),
+  _last_young_gc(false),
+  _last_gc_was_young(false),
 
   _eden_bytes_before_gc(0),
   _survivor_bytes_before_gc(0),
@@ -308,8 +309,8 @@
   _pending_card_diff_seq->add(0.0);
   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
-  _fully_young_cards_per_entry_ratio_seq->add(
-                            fully_young_cards_per_entry_ratio_defaults[index]);
+  _young_cards_per_entry_ratio_seq->add(
+                                  young_cards_per_entry_ratio_defaults[index]);
   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
@@ -606,7 +607,7 @@
 
   size_t young_list_target_length = 0;
   if (adaptive_young_list_length()) {
-    if (full_young_gcs()) {
+    if (gcs_are_young()) {
       young_list_target_length =
                         calculate_young_list_target_length(rs_lengths,
                                                            base_min_length,
@@ -619,10 +620,10 @@
       // possible to maximize how many old regions we can add to it.
     }
   } else {
-    if (full_young_gcs()) {
+    if (gcs_are_young()) {
       young_list_target_length = _young_list_fixed_length;
     } else {
-      // A bit arbitrary: during partially-young GCs we allocate half
+      // A bit arbitrary: during mixed GCs we allocate half
       // the young regions to try to add old regions to the CSet.
       young_list_target_length = _young_list_fixed_length / 2;
       // We choose to accept that we might go under the desired min
@@ -655,7 +656,7 @@
                                                    size_t desired_min_length,
                                                    size_t desired_max_length) {
   assert(adaptive_young_list_length(), "pre-condition");
-  assert(full_young_gcs(), "only call this for fully-young GCs");
+  assert(gcs_are_young(), "only call this for young GCs");
 
   // In case some edge-condition makes the desired max length too small...
   if (desired_max_length <= desired_min_length) {
@@ -858,12 +859,11 @@
 
   _g1->clear_full_collection();
 
-  // "Nuke" the heuristics that control the fully/partially young GC
-  // transitions and make sure we start with fully young GCs after the
-  // Full GC.
-  set_full_young_gcs(true);
-  _last_full_young_gc = false;
-  _should_revert_to_full_young_gcs = false;
+  // "Nuke" the heuristics that control the young/mixed GC
+  // transitions and make sure we start with young GCs after the Full GC.
+  set_gcs_are_young(true);
+  _last_young_gc = false;
+  _should_revert_to_young_gcs = false;
   clear_initiate_conc_mark_if_possible();
   clear_during_initial_mark_pause();
   _known_garbage_bytes = 0;
@@ -892,7 +892,7 @@
   if (PrintGCDetails) {
     gclog_or_tty->stamp(PrintGCTimeStamps);
     gclog_or_tty->print("[GC pause");
-    gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
+    gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed");
   }
 
   // We only need to do this here as the policy will only be applied
@@ -951,7 +951,7 @@
   // the evacuation pause if marking is in progress.
   _cur_satb_drain_time_ms = 0.0;
 
-  _last_young_gc_full = false;
+  _last_gc_was_young = false;
 
   // do that for any other surv rate groups
   _short_lived_surv_rate_group->stop_adding_regions();
@@ -988,8 +988,8 @@
 }
 
 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
-  _should_revert_to_full_young_gcs = false;
-  _last_full_young_gc = true;
+  _should_revert_to_young_gcs = false;
+  _last_young_gc = true;
   _in_marking_window = false;
 }
 
@@ -1153,7 +1153,7 @@
   size_t marking_initiating_used_threshold =
     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
 
-  if (!_g1->mark_in_progress() && !_last_full_young_gc) {
+  if (!_g1->mark_in_progress() && !_last_young_gc) {
     assert(!last_pause_included_initial_mark, "invariant");
     if (cur_used_bytes > marking_initiating_used_threshold) {
       if (cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
@@ -1458,57 +1458,57 @@
     new_in_marking_window_im = true;
   }
 
-  if (_last_full_young_gc) {
+  if (_last_young_gc) {
     if (!last_pause_included_initial_mark) {
-      ergo_verbose2(ErgoPartiallyYoungGCs,
-                    "start partially-young GCs",
+      ergo_verbose2(ErgoMixedGCs,
+                    "start mixed GCs",
                     ergo_format_byte_perc("known garbage"),
                     _known_garbage_bytes, _known_garbage_ratio * 100.0);
-      set_full_young_gcs(false);
+      set_gcs_are_young(false);
     } else {
-      ergo_verbose0(ErgoPartiallyYoungGCs,
-                    "do not start partially-young GCs",
+      ergo_verbose0(ErgoMixedGCs,
+                    "do not start mixed GCs",
                     ergo_format_reason("concurrent cycle is about to start"));
     }
-    _last_full_young_gc = false;
+    _last_young_gc = false;
   }
 
-  if ( !_last_young_gc_full ) {
-    if (_should_revert_to_full_young_gcs) {
-      ergo_verbose2(ErgoPartiallyYoungGCs,
-                    "end partially-young GCs",
-                    ergo_format_reason("partially-young GCs end requested")
+  if (!_last_gc_was_young) {
+    if (_should_revert_to_young_gcs) {
+      ergo_verbose2(ErgoMixedGCs,
+                    "end mixed GCs",
+                    ergo_format_reason("mixed GCs end requested")
                     ergo_format_byte_perc("known garbage"),
                     _known_garbage_bytes, _known_garbage_ratio * 100.0);
-      set_full_young_gcs(true);
+      set_gcs_are_young(true);
     } else if (_known_garbage_ratio < 0.05) {
-      ergo_verbose3(ErgoPartiallyYoungGCs,
-               "end partially-young GCs",
+      ergo_verbose3(ErgoMixedGCs,
+               "end mixed GCs",
                ergo_format_reason("known garbage percent lower than threshold")
                ergo_format_byte_perc("known garbage")
                ergo_format_perc("threshold"),
                _known_garbage_bytes, _known_garbage_ratio * 100.0,
                0.05 * 100.0);
-      set_full_young_gcs(true);
+      set_gcs_are_young(true);
     } else if (adaptive_young_list_length() &&
               (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) {
-      ergo_verbose5(ErgoPartiallyYoungGCs,
-                    "end partially-young GCs",
+      ergo_verbose5(ErgoMixedGCs,
+                    "end mixed GCs",
                     ergo_format_reason("current GC efficiency lower than "
-                                       "predicted fully-young GC efficiency")
+                                       "predicted young GC efficiency")
                     ergo_format_double("GC efficiency factor")
                     ergo_format_double("current GC efficiency")
-                    ergo_format_double("predicted fully-young GC efficiency")
+                    ergo_format_double("predicted young GC efficiency")
                     ergo_format_byte_perc("known garbage"),
                     get_gc_eff_factor(), cur_efficiency,
                     predict_young_gc_eff(),
                     _known_garbage_bytes, _known_garbage_ratio * 100.0);
-      set_full_young_gcs(true);
+      set_gcs_are_young(true);
     }
   }
-  _should_revert_to_full_young_gcs = false;
-
-  if (_last_young_gc_full && !_during_marking) {
+  _should_revert_to_young_gcs = false;
+
+  if (_last_gc_was_young && !_during_marking) {
     _young_gc_eff_seq->add(cur_efficiency);
   }
 
@@ -1534,19 +1534,21 @@
     double cost_per_entry_ms = 0.0;
     if (cards_scanned > 10) {
       cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
-      if (_last_young_gc_full)
+      if (_last_gc_was_young) {
         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
-      else
-        _partially_young_cost_per_entry_ms_seq->add(cost_per_entry_ms);
+      } else {
+        _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
+      }
     }
 
     if (_max_rs_lengths > 0) {
       double cards_per_entry_ratio =
         (double) cards_scanned / (double) _max_rs_lengths;
-      if (_last_young_gc_full)
-        _fully_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
-      else
-        _partially_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
+      if (_last_gc_was_young) {
+        _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
+      } else {
+        _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
+      }
     }
 
     // It turns out that, sometimes, _max_rs_lengths can get smaller
@@ -1563,10 +1565,11 @@
     double cost_per_byte_ms = 0.0;
     if (copied_bytes > 0) {
       cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
-      if (_in_marking_window)
+      if (_in_marking_window) {
         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
-      else
+      } else {
         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
+      }
     }
 
     double all_other_time_ms = pause_time_ms -
@@ -1722,10 +1725,11 @@
   size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
                       predict_rs_length_diff();
   size_t card_num;
-  if (full_young_gcs())
+  if (gcs_are_young()) {
     card_num = predict_young_card_num(rs_lengths);
-  else
+  } else {
     card_num = predict_non_young_card_num(rs_lengths);
+  }
   size_t young_byte_size = young_num * HeapRegion::GrainBytes;
   double accum_yg_surv_rate =
     _short_lived_surv_rate_group->accum_surv_rate(adjustment);
@@ -1745,10 +1749,11 @@
 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
   size_t rs_length = predict_rs_length_diff();
   size_t card_num;
-  if (full_young_gcs())
+  if (gcs_are_young()) {
     card_num = predict_young_card_num(rs_length);
-  else
+  } else {
     card_num = predict_non_young_card_num(rs_length);
+  }
   return predict_base_elapsed_time_ms(pending_cards, card_num);
 }
 
@@ -1766,10 +1771,11 @@
                                                   bool young) {
   size_t rs_length = hr->rem_set()->occupied();
   size_t card_num;
-  if (full_young_gcs())
+  if (gcs_are_young()) {
     card_num = predict_young_card_num(rs_length);
-  else
+  } else {
     card_num = predict_non_young_card_num(rs_length);
+  }
   size_t bytes_to_copy = predict_bytes_to_copy(hr);
 
   double region_elapsed_time_ms =
@@ -1817,14 +1823,14 @@
   // I don't think we need to do this when in young GC mode since
   // marking will be initiated next time we hit the soft limit anyway...
   if (predicted_time_ms > _expensive_region_limit_ms) {
-    ergo_verbose2(ErgoPartiallyYoungGCs,
-              "request partially-young GCs end",
+    ergo_verbose2(ErgoMixedGCs,
+              "request mixed GCs end",
               ergo_format_reason("predicted region time higher than threshold")
               ergo_format_ms("predicted region time")
               ergo_format_ms("threshold"),
               predicted_time_ms, _expensive_region_limit_ms);
-    // no point in doing another partial one
-    _should_revert_to_full_young_gcs = true;
+    // no point in doing another mixed GC
+    _should_revert_to_young_gcs = true;
   }
 }
 
@@ -2033,8 +2039,8 @@
     print_summary_sd(0, "Total", _all_pause_times_ms);
     gclog_or_tty->print_cr("");
     gclog_or_tty->print_cr("");
-    gclog_or_tty->print_cr("   Full Young GC Pauses:    %8d", _full_young_pause_num);
-    gclog_or_tty->print_cr("   Partial Young GC Pauses: %8d", _partial_young_pause_num);
+    gclog_or_tty->print_cr("   Young GC Pauses: %8d", _young_pause_num);
+    gclog_or_tty->print_cr("   Mixed GC Pauses: %8d", _mixed_pause_num);
     gclog_or_tty->print_cr("");
 
     gclog_or_tty->print_cr("EVACUATION PAUSES");
@@ -2188,11 +2194,11 @@
       // initiate a new cycle.
 
       set_during_initial_mark_pause();
-      // We do not allow non-full young GCs during marking.
-      if (!full_young_gcs()) {
-        set_full_young_gcs(true);
-        ergo_verbose0(ErgoPartiallyYoungGCs,
-                      "end partially-young GCs",
+      // We do not allow mixed GCs during marking.
+      if (!gcs_are_young()) {
+        set_gcs_are_young(true);
+        ergo_verbose0(ErgoMixedGCs,
+                      "end mixed GCs",
                       ergo_format_reason("concurrent cycle is about to start"));
       }
 
@@ -2623,12 +2629,12 @@
   double young_start_time_sec = os::elapsedTime();
 
   _collection_set_bytes_used_before = 0;
-  _last_young_gc_full = full_young_gcs() ? true : false;
-
-  if (_last_young_gc_full) {
-    ++_full_young_pause_num;
+  _last_gc_was_young = gcs_are_young() ? true : false;
+
+  if (_last_gc_was_young) {
+    ++_young_pause_num;
   } else {
-    ++_partial_young_pause_num;
+    ++_mixed_pause_num;
   }
 
   // The young list is laid with the survivor regions from the previous
@@ -2675,7 +2681,7 @@
   // We are doing young collections so reset this.
   non_young_start_time_sec = young_end_time_sec;
 
-  if (!full_young_gcs()) {
+  if (!gcs_are_young()) {
     bool should_continue = true;
     NumberSeq seq;
     double avg_prediction = 100000000000000000.0; // something very large
@@ -2732,14 +2738,14 @@
     } while (should_continue);
 
     if (!adaptive_young_list_length() &&
-                             cset_region_length() < _young_list_fixed_length) {
+        cset_region_length() < _young_list_fixed_length) {
       ergo_verbose2(ErgoCSetConstruction,
-                    "request partially-young GCs end",
+                    "request mixed GCs end",
                     ergo_format_reason("CSet length lower than target")
                     ergo_format_region("CSet")
                     ergo_format_region("young target"),
                     cset_region_length(), _young_list_fixed_length);
-      _should_revert_to_full_young_gcs  = true;
+      _should_revert_to_young_gcs  = true;
     }
 
     ergo_verbose2(ErgoCSetConstruction | ErgoHigh,
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Fri Dec 23 15:24:43 2011 -0800
@@ -164,8 +164,8 @@
   // times for a given worker thread.
   double* _par_last_gc_worker_other_times_ms;
 
-  // indicates whether we are in full young or partially young GC mode
-  bool _full_young_gcs;
+  // indicates whether we are in young or mixed GC mode
+  bool _gcs_are_young;
 
   // if true, then it tries to dynamically adjust the length of the
   // young list
@@ -178,10 +178,10 @@
   // locker is active. This should be >= _young_list_target_length;
   size_t _young_list_max_length;
 
-  bool   _last_young_gc_full;
+  bool                  _last_gc_was_young;
 
-  unsigned              _full_young_pause_num;
-  unsigned              _partial_young_pause_num;
+  unsigned              _young_pause_num;
+  unsigned              _mixed_pause_num;
 
   bool                  _during_marking;
   bool                  _in_marking_window;
@@ -211,10 +211,10 @@
   TruncatedSeq* _pending_card_diff_seq;
   TruncatedSeq* _rs_length_diff_seq;
   TruncatedSeq* _cost_per_card_ms_seq;
-  TruncatedSeq* _fully_young_cards_per_entry_ratio_seq;
-  TruncatedSeq* _partially_young_cards_per_entry_ratio_seq;
+  TruncatedSeq* _young_cards_per_entry_ratio_seq;
+  TruncatedSeq* _mixed_cards_per_entry_ratio_seq;
   TruncatedSeq* _cost_per_entry_ms_seq;
-  TruncatedSeq* _partially_young_cost_per_entry_ms_seq;
+  TruncatedSeq* _mixed_cost_per_entry_ms_seq;
   TruncatedSeq* _cost_per_byte_ms_seq;
   TruncatedSeq* _constant_other_time_ms_seq;
   TruncatedSeq* _young_other_cost_per_region_ms_seq;
@@ -322,20 +322,22 @@
 
   size_t predict_pending_card_diff() {
     double prediction = get_new_neg_prediction(_pending_card_diff_seq);
-    if (prediction < 0.00001)
+    if (prediction < 0.00001) {
       return 0;
-    else
+    } else {
       return (size_t) prediction;
+    }
   }
 
   size_t predict_pending_cards() {
     size_t max_pending_card_num = _g1->max_pending_card_num();
     size_t diff = predict_pending_card_diff();
     size_t prediction;
-    if (diff > max_pending_card_num)
+    if (diff > max_pending_card_num) {
       prediction = max_pending_card_num;
-    else
+    } else {
       prediction = max_pending_card_num - diff;
+    }
 
     return prediction;
   }
@@ -356,57 +358,62 @@
     return (double) pending_cards * predict_cost_per_card_ms();
   }
 
-  double predict_fully_young_cards_per_entry_ratio() {
-    return get_new_prediction(_fully_young_cards_per_entry_ratio_seq);
+  double predict_young_cards_per_entry_ratio() {
+    return get_new_prediction(_young_cards_per_entry_ratio_seq);
   }
 
-  double predict_partially_young_cards_per_entry_ratio() {
-    if (_partially_young_cards_per_entry_ratio_seq->num() < 2)
-      return predict_fully_young_cards_per_entry_ratio();
-    else
-      return get_new_prediction(_partially_young_cards_per_entry_ratio_seq);
+  double predict_mixed_cards_per_entry_ratio() {
+    if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
+      return predict_young_cards_per_entry_ratio();
+    } else {
+      return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
+    }
   }
 
   size_t predict_young_card_num(size_t rs_length) {
     return (size_t) ((double) rs_length *
-                     predict_fully_young_cards_per_entry_ratio());
+                     predict_young_cards_per_entry_ratio());
   }
 
   size_t predict_non_young_card_num(size_t rs_length) {
     return (size_t) ((double) rs_length *
-                     predict_partially_young_cards_per_entry_ratio());
+                     predict_mixed_cards_per_entry_ratio());
   }
 
   double predict_rs_scan_time_ms(size_t card_num) {
-    if (full_young_gcs())
+    if (gcs_are_young()) {
       return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
-    else
-      return predict_partially_young_rs_scan_time_ms(card_num);
+    } else {
+      return predict_mixed_rs_scan_time_ms(card_num);
+    }
   }
 
-  double predict_partially_young_rs_scan_time_ms(size_t card_num) {
-    if (_partially_young_cost_per_entry_ms_seq->num() < 3)
+  double predict_mixed_rs_scan_time_ms(size_t card_num) {
+    if (_mixed_cost_per_entry_ms_seq->num() < 3) {
       return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
-    else
-      return (double) card_num *
-        get_new_prediction(_partially_young_cost_per_entry_ms_seq);
+    } else {
+      return (double) (card_num *
+                       get_new_prediction(_mixed_cost_per_entry_ms_seq));
+    }
   }
 
   double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
-    if (_cost_per_byte_ms_during_cm_seq->num() < 3)
-      return 1.1 * (double) bytes_to_copy *
-        get_new_prediction(_cost_per_byte_ms_seq);
-    else
+    if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
+      return (1.1 * (double) bytes_to_copy) *
+              get_new_prediction(_cost_per_byte_ms_seq);
+    } else {
       return (double) bytes_to_copy *
-        get_new_prediction(_cost_per_byte_ms_during_cm_seq);
+             get_new_prediction(_cost_per_byte_ms_during_cm_seq);
+    }
   }
 
   double predict_object_copy_time_ms(size_t bytes_to_copy) {
-    if (_in_marking_window && !_in_marking_window_im)
+    if (_in_marking_window && !_in_marking_window_im) {
       return predict_object_copy_time_ms_during_cm(bytes_to_copy);
-    else
+    } else {
       return (double) bytes_to_copy *
-        get_new_prediction(_cost_per_byte_ms_seq);
+              get_new_prediction(_cost_per_byte_ms_seq);
+    }
   }
 
   double predict_constant_other_time_ms() {
@@ -414,15 +421,13 @@
   }
 
   double predict_young_other_time_ms(size_t young_num) {
-    return
-      (double) young_num *
-      get_new_prediction(_young_other_cost_per_region_ms_seq);
+    return (double) young_num *
+           get_new_prediction(_young_other_cost_per_region_ms_seq);
   }
 
   double predict_non_young_other_time_ms(size_t non_young_num) {
-    return
-      (double) non_young_num *
-      get_new_prediction(_non_young_other_cost_per_region_ms_seq);
+    return (double) non_young_num *
+           get_new_prediction(_non_young_other_cost_per_region_ms_seq);
   }
 
   void check_if_region_is_too_expensive(double predicted_time_ms);
@@ -456,7 +461,7 @@
   double predict_survivor_regions_evac_time();
 
   void cset_regions_freed() {
-    bool propagate = _last_young_gc_full && !_in_marking_window;
+    bool propagate = _last_gc_was_young && !_in_marking_window;
     _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
     _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
     // also call it on any more surv rate groups
@@ -628,8 +633,8 @@
   // initial-mark work.
   volatile bool _during_initial_mark_pause;
 
-  bool _should_revert_to_full_young_gcs;
-  bool _last_full_young_gc;
+  bool _should_revert_to_young_gcs;
+  bool _last_young_gc;
 
   // This set of variables tracks the collector efficiency, in order to
   // determine whether we should initiate a new marking.
@@ -985,11 +990,11 @@
     return _young_list_max_length;
   }
 
-  bool full_young_gcs() {
-    return _full_young_gcs;
+  bool gcs_are_young() {
+    return _gcs_are_young;
   }
-  void set_full_young_gcs(bool full_young_gcs) {
-    _full_young_gcs = full_young_gcs;
+  void set_gcs_are_young(bool gcs_are_young) {
+    _gcs_are_young = gcs_are_young;
   }
 
   bool adaptive_young_list_length() {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ErgoVerbose.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ErgoVerbose.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -52,14 +52,13 @@
 const char* G1ErgoVerbose::to_string(int tag) {
   ErgoHeuristic n = extract_heuristic(tag);
   switch (n) {
-  case ErgoHeapSizing:            return "Heap Sizing";
-  case ErgoCSetConstruction:      return "CSet Construction";
-  case ErgoConcCycles:            return "Concurrent Cycles";
-  case ErgoPartiallyYoungGCs:     return "Partially-Young GCs";
+  case ErgoHeapSizing:        return "Heap Sizing";
+  case ErgoCSetConstruction:  return "CSet Construction";
+  case ErgoConcCycles:        return "Concurrent Cycles";
+  case ErgoMixedGCs:          return "Mixed GCs";
   default:
     ShouldNotReachHere();
     // Keep the Windows compiler happy
     return NULL;
   }
 }
-
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp	Fri Dec 23 15:24:43 2011 -0800
@@ -69,7 +69,7 @@
   ErgoHeapSizing = 0,
   ErgoCSetConstruction,
   ErgoConcCycles,
-  ErgoPartiallyYoungGCs,
+  ErgoMixedGCs,
 
   ErgoHeuristicNum
 } ErgoHeuristic;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Fri Dec 23 15:24:43 2011 -0800
@@ -119,7 +119,7 @@
   G1CollectedHeap* _g1h;
 
   // jstat performance counters
-  //  incremental collections both fully and partially young
+  //  incremental collections both young and mixed
   CollectorCounters*   _incremental_collection_counters;
   //  full stop-the-world collections
   CollectorCounters*   _full_collection_counters;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -672,15 +672,20 @@
 }
 
 jlong PSMarkSweep::millis_since_last_gc() {
-  jlong ret_val = os::javaTimeMillis() - _time_of_last_gc;
+  // We need a monotonically non-deccreasing time in ms but
+  // os::javaTimeMillis() does not guarantee monotonicity.
+  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
+  jlong ret_val = now - _time_of_last_gc;
   // XXX See note in genCollectedHeap::millis_since_last_gc().
   if (ret_val < 0) {
-    NOT_PRODUCT(warning("time warp: %d", ret_val);)
+    NOT_PRODUCT(warning("time warp: "INT64_FORMAT, ret_val);)
     return 0;
   }
   return ret_val;
 }
 
 void PSMarkSweep::reset_millis_since_last_gc() {
-  _time_of_last_gc = os::javaTimeMillis();
+  // We need a monotonically non-deccreasing time in ms but
+  // os::javaTimeMillis() does not guarantee monotonicity.
+  _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -3398,17 +3398,22 @@
 }
 
 jlong PSParallelCompact::millis_since_last_gc() {
-  jlong ret_val = os::javaTimeMillis() - _time_of_last_gc;
+  // We need a monotonically non-deccreasing time in ms but
+  // os::javaTimeMillis() does not guarantee monotonicity.
+  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
+  jlong ret_val = now - _time_of_last_gc;
   // XXX See note in genCollectedHeap::millis_since_last_gc().
   if (ret_val < 0) {
-    NOT_PRODUCT(warning("time warp: %d", ret_val);)
+    NOT_PRODUCT(warning("time warp: "INT64_FORMAT, ret_val);)
     return 0;
   }
   return ret_val;
 }
 
 void PSParallelCompact::reset_millis_since_last_gc() {
-  _time_of_last_gc = os::javaTimeMillis();
+  // We need a monotonically non-deccreasing time in ms but
+  // os::javaTimeMillis() does not guarantee monotonicity.
+  _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 }
 
 ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -471,3 +471,26 @@
 
   return mirror;
 }
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+void CollectedHeap::test_is_in() {
+  CollectedHeap* heap = Universe::heap();
+
+  // Test that NULL is not in the heap.
+  assert(!heap->is_in(NULL), "NULL is unexpectedly in the heap");
+
+  // Test that a pointer to before the heap start is reported as outside the heap.
+  assert(heap->_reserved.start() >= (void*)MinObjAlignment, "sanity");
+  void* before_heap = (void*)((intptr_t)heap->_reserved.start() - MinObjAlignment);
+  assert(!heap->is_in(before_heap),
+      err_msg("before_heap: " PTR_FORMAT " is unexpectedly in the heap", before_heap));
+
+  // Test that a pointer to after the heap end is reported as outside the heap.
+  assert(heap->_reserved.end() <= (void*)(uintptr_t(-1) - (uint)MinObjAlignment), "sanity");
+  void* after_heap = (void*)((intptr_t)heap->_reserved.end() + MinObjAlignment);
+  assert(!heap->is_in(after_heap),
+      err_msg("after_heap: " PTR_FORMAT " is unexpectedly in the heap", after_heap));
+}
+#endif
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Fri Dec 23 15:24:43 2011 -0800
@@ -217,8 +217,8 @@
     return p == NULL || is_in_reserved(p);
   }
 
-  // Returns "TRUE" if "p" points to the head of an allocated object in the
-  // heap. Since this method can be expensive in general, we restrict its
+  // Returns "TRUE" iff "p" points into the committed areas of the heap.
+  // Since this method can be expensive in general, we restrict its
   // use to assertion checking only.
   virtual bool is_in(const void* p) const = 0;
 
@@ -648,6 +648,10 @@
   // reduce the occurrence of ParallelGCThreads to uses where the
   // actual number may be germane.
   static bool use_parallel_gc_threads() { return ParallelGCThreads > 0; }
+
+  /////////////// Unit tests ///////////////
+
+  NOT_PRODUCT(static void test_is_in();)
 };
 
 // Class to set and reset the GC cause for a CollectedHeap.
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -957,7 +957,7 @@
   return result;
 }
 
-// Returns "TRUE" iff "p" points into the allocated area of the heap.
+// Returns "TRUE" iff "p" points into the committed areas of the heap.
 bool GenCollectedHeap::is_in(const void* p) const {
   #ifndef ASSERT
   guarantee(VerifyBeforeGC   ||
@@ -1460,26 +1460,22 @@
 };
 
 jlong GenCollectedHeap::millis_since_last_gc() {
-  jlong now = os::javaTimeMillis();
+  // We need a monotonically non-deccreasing time in ms but
+  // os::javaTimeMillis() does not guarantee monotonicity.
+  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
   GenTimeOfLastGCClosure tolgc_cl(now);
   // iterate over generations getting the oldest
   // time that a generation was collected
   generation_iterate(&tolgc_cl, false);
   tolgc_cl.do_generation(perm_gen());
-  // XXX Despite the assert above, since javaTimeMillis()
-  // doesnot guarantee monotonically increasing return
-  // values (note, i didn't say "strictly monotonic"),
-  // we need to guard against getting back a time
-  // later than now. This should be fixed by basing
-  // on someting like gethrtime() which guarantees
-  // monotonicity. Note that cond_wait() is susceptible
-  // to a similar problem, because its interface is
-  // based on absolute time in the form of the
-  // system time's notion of UCT. See also 4506635
-  // for yet another problem of similar nature. XXX
+
+  // javaTimeNanos() is guaranteed to be monotonically non-decreasing
+  // provided the underlying platform provides such a time source
+  // (and it is bug free). So we still have to guard against getting
+  // back a time later than 'now'.
   jlong retVal = now - tolgc_cl.time();
   if (retVal < 0) {
-    NOT_PRODUCT(warning("time warp: %d", retVal);)
+    NOT_PRODUCT(warning("time warp: "INT64_FORMAT, retVal);)
     return 0;
   }
   return retVal;
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Fri Dec 23 15:24:43 2011 -0800
@@ -198,7 +198,7 @@
   // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry.
   void collect(GCCause::Cause cause, int max_level);
 
-  // Returns "TRUE" iff "p" points into the allocated area of the heap.
+  // Returns "TRUE" iff "p" points into the committed areas of the heap.
   // The methods is_in(), is_in_closed_subset() and is_in_youngest() may
   // be expensive to compute in general, so, to prevent
   // their inadvertent use in product jvm's, we restrict their use to
--- a/hotspot/src/share/vm/memory/generation.hpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/memory/generation.hpp	Fri Dec 23 15:24:43 2011 -0800
@@ -220,7 +220,7 @@
   MemRegion prev_used_region() const { return _prev_used_region; }
   virtual void  save_used_region()   { _prev_used_region = used_region(); }
 
-  // Returns "TRUE" iff "p" points into an allocated object in the generation.
+  // Returns "TRUE" iff "p" points into the committed areas in the generation.
   // For some kinds of generations, this may be an expensive operation.
   // To avoid performance problems stemming from its inadvertent use in
   // product jvm's, we restrict its use to assertion checking or
@@ -413,10 +413,13 @@
   // Time (in ms) when we were last collected or now if a collection is
   // in progress.
   virtual jlong time_of_last_gc(jlong now) {
-    // XXX See note in genCollectedHeap::millis_since_last_gc()
+    // Both _time_of_last_gc and now are set using a time source
+    // that guarantees monotonically non-decreasing values provided
+    // the underlying platform provides such a source. So we still
+    // have to guard against non-monotonicity.
     NOT_PRODUCT(
       if (now < _time_of_last_gc) {
-        warning("time warp: %d to %d", _time_of_last_gc, now);
+        warning("time warp: "INT64_FORMAT" to "INT64_FORMAT, _time_of_last_gc, now);
       }
     )
     return _time_of_last_gc;
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -43,7 +43,9 @@
 }
 
 void ReferenceProcessor::init_statics() {
-  jlong now = os::javaTimeMillis();
+  // We need a monotonically non-deccreasing time in ms but
+  // os::javaTimeMillis() does not guarantee monotonicity.
+  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 
   // Initialize the soft ref timestamp clock.
   _soft_ref_timestamp_clock = now;
@@ -151,7 +153,10 @@
 void ReferenceProcessor::update_soft_ref_master_clock() {
   // Update (advance) the soft ref master clock field. This must be done
   // after processing the soft ref list.
-  jlong now = os::javaTimeMillis();
+
+  // We need a monotonically non-deccreasing time in ms but
+  // os::javaTimeMillis() does not guarantee monotonicity.
+  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
   jlong soft_ref_clock = java_lang_ref_SoftReference::clock();
   assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync");
 
@@ -161,10 +166,11 @@
             _soft_ref_timestamp_clock, now);
   }
   )
-  // In product mode, protect ourselves from system time being adjusted
-  // externally and going backward; see note in the implementation of
-  // GenCollectedHeap::time_since_last_gc() for the right way to fix
-  // this uniformly throughout the VM; see bug-id 4741166. XXX
+  // The values of now and _soft_ref_timestamp_clock are set using
+  // javaTimeNanos(), which is guaranteed to be monotonically
+  // non-decreasing provided the underlying platform provides such
+  // a time source (and it is bug free).
+  // In product mode, however, protect ourselves from non-monotonicty.
   if (now > _soft_ref_timestamp_clock) {
     _soft_ref_timestamp_clock = now;
     java_lang_ref_SoftReference::set_clock(now);
--- a/hotspot/src/share/vm/memory/space.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/memory/space.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -304,11 +304,6 @@
   CompactibleSpace::clear(mangle_space);
 }
 
-bool Space::is_in(const void* p) const {
-  HeapWord* b = block_start_const(p);
-  return b != NULL && block_is_obj(b);
-}
-
 bool ContiguousSpace::is_in(const void* p) const {
   return _bottom <= p && p < _top;
 }
--- a/hotspot/src/share/vm/memory/space.hpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/memory/space.hpp	Fri Dec 23 15:24:43 2011 -0800
@@ -187,7 +187,7 @@
   // expensive operation. To prevent performance problems
   // on account of its inadvertent use in product jvm's,
   // we restrict its use to assertion checks only.
-  virtual bool is_in(const void* p) const;
+  virtual bool is_in(const void* p) const = 0;
 
   // Returns true iff the given reserved memory of the space contains the
   // given address.
--- a/hotspot/src/share/vm/oops/arrayOop.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/oops/arrayOop.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -38,9 +38,7 @@
   return (julong)(size_t)bytes == bytes;
 }
 
-bool arrayOopDesc::test_max_array_length() {
-  tty->print_cr("test_max_array_length");
-
+void arrayOopDesc::test_max_array_length() {
   assert(check_max_length_overflow(T_BOOLEAN), "size_t overflow for boolean array");
   assert(check_max_length_overflow(T_CHAR), "size_t overflow for char array");
   assert(check_max_length_overflow(T_FLOAT), "size_t overflow for float array");
@@ -54,8 +52,6 @@
   assert(check_max_length_overflow(T_NARROWOOP), "size_t overflow for narrowOop array");
 
   // T_VOID and T_ADDRESS are not supported by max_array_length()
-
-  return true;
 }
 
 
--- a/hotspot/src/share/vm/oops/arrayOop.hpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/oops/arrayOop.hpp	Fri Dec 23 15:24:43 2011 -0800
@@ -128,7 +128,7 @@
 #ifndef PRODUCT
   static bool check_max_length_overflow(BasicType type);
   static int32_t old_max_array_length(BasicType type);
-  static bool test_max_array_length();
+  static void test_max_array_length();
 #endif
 };
 
--- a/hotspot/src/share/vm/prims/jni.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/prims/jni.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -5037,16 +5037,25 @@
 
 #ifndef PRODUCT
 
+#include "gc_interface/collectedHeap.hpp"
 #include "utilities/quickSort.hpp"
 
+#define run_unit_test(unit_test_function_call)              \
+  tty->print_cr("Running test: " #unit_test_function_call); \
+  unit_test_function_call
+
 void execute_internal_vm_tests() {
   if (ExecuteInternalVMTests) {
-    assert(QuickSort::test_quick_sort(), "test_quick_sort failed");
-    assert(arrayOopDesc::test_max_array_length(), "test_max_array_length failed");
+    tty->print_cr("Running internal VM tests");
+    run_unit_test(arrayOopDesc::test_max_array_length());
+    run_unit_test(CollectedHeap::test_is_in());
+    run_unit_test(QuickSort::test_quick_sort());
     tty->print_cr("All internal VM tests passed");
   }
 }
 
+#undef run_unit_test
+
 #endif
 
 #ifndef USDT2
--- a/hotspot/src/share/vm/prims/jvm.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/prims/jvm.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -3515,14 +3515,14 @@
 JVM_LEAF(jint, JVM_Recv(jint fd, char *buf, jint nBytes, jint flags))
   JVMWrapper2("JVM_Recv (0x%x)", fd);
   //%note jvm_r6
-  return os::recv(fd, buf, nBytes, flags);
+  return os::recv(fd, buf, (size_t)nBytes, (uint)flags);
 JVM_END
 
 
 JVM_LEAF(jint, JVM_Send(jint fd, char *buf, jint nBytes, jint flags))
   JVMWrapper2("JVM_Send (0x%x)", fd);
   //%note jvm_r6
-  return os::send(fd, buf, nBytes, flags);
+  return os::send(fd, buf, (size_t)nBytes, (uint)flags);
 JVM_END
 
 
@@ -3543,42 +3543,51 @@
 JVM_LEAF(jint, JVM_Connect(jint fd, struct sockaddr *him, jint len))
   JVMWrapper2("JVM_Connect (0x%x)", fd);
   //%note jvm_r6
-  return os::connect(fd, him, len);
+  return os::connect(fd, him, (socklen_t)len);
 JVM_END
 
 
 JVM_LEAF(jint, JVM_Bind(jint fd, struct sockaddr *him, jint len))
   JVMWrapper2("JVM_Bind (0x%x)", fd);
   //%note jvm_r6
-  return os::bind(fd, him, len);
+  return os::bind(fd, him, (socklen_t)len);
 JVM_END
 
 
 JVM_LEAF(jint, JVM_Accept(jint fd, struct sockaddr *him, jint *len))
   JVMWrapper2("JVM_Accept (0x%x)", fd);
   //%note jvm_r6
-  return os::accept(fd, him, (int *)len);
+  socklen_t socklen = (socklen_t)(*len);
+  jint result = os::accept(fd, him, &socklen);
+  *len = (jint)socklen;
+  return result;
 JVM_END
 
 
 JVM_LEAF(jint, JVM_RecvFrom(jint fd, char *buf, int nBytes, int flags, struct sockaddr *from, int *fromlen))
   JVMWrapper2("JVM_RecvFrom (0x%x)", fd);
   //%note jvm_r6
-  return os::recvfrom(fd, buf, nBytes, flags, from, fromlen);
+  socklen_t socklen = (socklen_t)(*fromlen);
+  jint result = os::recvfrom(fd, buf, (size_t)nBytes, (uint)flags, from, &socklen);
+  *fromlen = (int)socklen;
+  return result;
 JVM_END
 
 
 JVM_LEAF(jint, JVM_GetSockName(jint fd, struct sockaddr *him, int *len))
   JVMWrapper2("JVM_GetSockName (0x%x)", fd);
   //%note jvm_r6
-  return os::get_sock_name(fd, him, len);
+  socklen_t socklen = (socklen_t)(*len);
+  jint result = os::get_sock_name(fd, him, &socklen);
+  *len = (int)socklen;
+  return result;
 JVM_END
 
 
 JVM_LEAF(jint, JVM_SendTo(jint fd, char *buf, int len, int flags, struct sockaddr *to, int tolen))
   JVMWrapper2("JVM_SendTo (0x%x)", fd);
   //%note jvm_r6
-  return os::sendto(fd, buf, len, flags, to, tolen);
+  return os::sendto(fd, buf, (size_t)len, (uint)flags, to, (socklen_t)tolen);
 JVM_END
 
 
@@ -3592,21 +3601,26 @@
 JVM_LEAF(jint, JVM_GetSockOpt(jint fd, int level, int optname, char *optval, int *optlen))
   JVMWrapper2("JVM_GetSockOpt (0x%x)", fd);
   //%note jvm_r6
-  return os::get_sock_opt(fd, level, optname, optval, optlen);
+  socklen_t socklen = (socklen_t)(*optlen);
+  jint result = os::get_sock_opt(fd, level, optname, optval, &socklen);
+  *optlen = (int)socklen;
+  return result;
 JVM_END
 
 
 JVM_LEAF(jint, JVM_SetSockOpt(jint fd, int level, int optname, const char *optval, int optlen))
   JVMWrapper2("JVM_GetSockOpt (0x%x)", fd);
   //%note jvm_r6
-  return os::set_sock_opt(fd, level, optname, optval, optlen);
+  return os::set_sock_opt(fd, level, optname, optval, (socklen_t)optlen);
 JVM_END
 
+
 JVM_LEAF(int, JVM_GetHostName(char* name, int namelen))
   JVMWrapper("JVM_GetHostName");
   return os::get_host_name(name, namelen);
 JVM_END
 
+
 // Library support ///////////////////////////////////////////////////////////////////////////
 
 JVM_ENTRY_NO_ENV(void*, JVM_LoadLibrary(const char* name))
@@ -3647,6 +3661,7 @@
   return os::dll_lookup(handle, name);
 JVM_END
 
+
 // Floating point support ////////////////////////////////////////////////////////////////////
 
 JVM_LEAF(jboolean, JVM_IsNaN(jdouble a))
@@ -3655,7 +3670,6 @@
 JVM_END
 
 
-
 // JNI version ///////////////////////////////////////////////////////////////////////////////
 
 JVM_LEAF(jboolean, JVM_IsSupportedJNIVersion(jint version))
--- a/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -43,7 +43,7 @@
 #ifdef TARGET_ARCH_ppc
 # include "bytes_ppc.hpp"
 #endif
-// FIXME: add Deprecated, LVT, LVTT attributes
+// FIXME: add Deprecated, LVTT attributes
 // FIXME: fix Synthetic attribute
 // FIXME: per Serguei, add error return handling for constantPoolOopDesc::copy_cpool_bytes()
 
@@ -136,8 +136,9 @@
   constMethodHandle const_method(thread(), method->constMethod());
   u2 line_num_cnt = 0;
   int stackmap_len = 0;
+  int local_variable_table_length = 0;
 
-  // compute number and length of attributes -- FIXME: for now no LVT
+  // compute number and length of attributes
   int attr_count = 0;
   int attr_size = 0;
   if (const_method->has_linenumber_table()) {
@@ -170,6 +171,25 @@
       attr_size += 2 + 4 + stackmap_len;
     }
   }
+  if (method->has_localvariable_table()) {
+    local_variable_table_length = method->localvariable_table_length();
+    ++attr_count;
+    if (local_variable_table_length != 0) {
+      // Compute the size of the local variable table attribute (VM stores raw):
+      // LocalVariableTable_attribute {
+      //   u2 attribute_name_index;
+      //   u4 attribute_length;
+      //   u2 local_variable_table_length;
+      //   {
+      //     u2 start_pc;
+      //     u2 length;
+      //     u2 name_index;
+      //     u2 descriptor_index;
+      //     u2 index;
+      //   }
+      attr_size += 2 + 4 + 2 + local_variable_table_length * (2 + 2 + 2 + 2 + 2);
+    }
+  }
 
   typeArrayHandle exception_table(thread(), const_method->exception_table());
   int exception_table_length = exception_table->length();
@@ -203,8 +223,9 @@
   if (stackmap_len != 0) {
     write_stackmap_table_attribute(method, stackmap_len);
   }
-
-  // FIXME: write LVT attribute
+  if (local_variable_table_length != 0) {
+    write_local_variable_table_attribute(method, local_variable_table_length);
+  }
 }
 
 // Write Exceptions attribute
@@ -371,6 +392,36 @@
   }
 }
 
+// Write LineNumberTable attribute
+// JVMSpec|   LocalVariableTable_attribute {
+// JVMSpec|     u2 attribute_name_index;
+// JVMSpec|     u4 attribute_length;
+// JVMSpec|     u2 local_variable_table_length;
+// JVMSpec|     {  u2 start_pc;
+// JVMSpec|       u2 length;
+// JVMSpec|       u2 name_index;
+// JVMSpec|       u2 descriptor_index;
+// JVMSpec|       u2 index;
+// JVMSpec|     } local_variable_table[local_variable_table_length];
+// JVMSpec|   }
+void JvmtiClassFileReconstituter::write_local_variable_table_attribute(methodHandle method, u2 num_entries) {
+    write_attribute_name_index("LocalVariableTable");
+    write_u4(2 + num_entries * (2 + 2 + 2 + 2 + 2));
+    write_u2(num_entries);
+
+    assert(method->localvariable_table_length() == num_entries, "just checking");
+
+    LocalVariableTableElement *elem = method->localvariable_table_start();
+    for (int j=0; j<method->localvariable_table_length(); j++) {
+      write_u2(elem->start_bci);
+      write_u2(elem->length);
+      write_u2(elem->name_cp_index);
+      write_u2(elem->descriptor_cp_index);
+      write_u2(elem->slot);
+      elem++;
+    }
+}
+
 // Write stack map table attribute
 // JSR-202|   StackMapTable_attribute {
 // JSR-202|     u2 attribute_name_index;
--- a/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.hpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.hpp	Fri Dec 23 15:24:43 2011 -0800
@@ -119,6 +119,7 @@
   void write_source_debug_extension_attribute();
   u2 line_number_table_entries(methodHandle method);
   void write_line_number_table_attribute(methodHandle method, u2 num_entries);
+  void write_local_variable_table_attribute(methodHandle method, u2 num_entries);
   void write_stackmap_table_attribute(methodHandle method, int stackmap_table_len);
   u2 inner_classes_attribute_length();
   void write_inner_classes_attribute(int length);
--- a/hotspot/src/share/vm/runtime/globals.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/runtime/globals.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -82,16 +82,19 @@
 }
 
 bool Flag::is_writeable() const {
-  return (strcmp(kind, "{manageable}") == 0 || strcmp(kind, "{product rw}") == 0);
+  return strcmp(kind, "{manageable}") == 0 ||
+         strcmp(kind, "{product rw}") == 0 ||
+         is_writeable_ext();
 }
 
-// All flags except "manageable" are assumed internal flags.
+// All flags except "manageable" are assumed to be internal flags.
 // Long term, we need to define a mechanism to specify which flags
 // are external/stable and change this function accordingly.
 bool Flag::is_external() const {
-  return (strcmp(kind, "{manageable}") == 0);
+  return strcmp(kind, "{manageable}") == 0 || is_external_ext();
 }
 
+
 // Length of format string (e.g. "%.1234s") for printing ccstr below
 #define FORMAT_BUFFER_LEN 16
 
--- a/hotspot/src/share/vm/runtime/globals.hpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Fri Dec 23 15:24:43 2011 -0800
@@ -245,6 +245,8 @@
 
   bool is_unlocker_ext() const;
   bool is_unlocked_ext() const;
+  bool is_writeable_ext() const;
+  bool is_external_ext() const;
 
   void print_on(outputStream* st, bool withComments = false );
   void print_as_flag(outputStream* st);
--- a/hotspot/src/share/vm/runtime/globals_ext.hpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/runtime/globals_ext.hpp	Fri Dec 23 15:24:43 2011 -0800
@@ -53,4 +53,12 @@
   return true;
 }
 
+inline bool Flag::is_writeable_ext() const {
+  return false;
+}
+
+inline bool Flag::is_external_ext() const {
+  return false;
+}
+
 #endif // SHARE_VM_RUNTIME_GLOBALS_EXT_HPP
--- a/hotspot/src/share/vm/runtime/os.hpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/runtime/os.hpp	Fri Dec 23 15:24:43 2011 -0800
@@ -584,28 +584,28 @@
   static int socket(int domain, int type, int protocol);
   static int socket_close(int fd);
   static int socket_shutdown(int fd, int howto);
-  static int recv(int fd, char *buf, int nBytes, int flags);
-  static int send(int fd, char *buf, int nBytes, int flags);
-  static int raw_send(int fd, char *buf, int nBytes, int flags);
+  static int recv(int fd, char* buf, size_t nBytes, uint flags);
+  static int send(int fd, char* buf, size_t nBytes, uint flags);
+  static int raw_send(int fd, char* buf, size_t nBytes, uint flags);
   static int timeout(int fd, long timeout);
   static int listen(int fd, int count);
-  static int connect(int fd, struct sockaddr *him, int len);
-  static int bind(int fd, struct sockaddr *him, int len);
-  static int accept(int fd, struct sockaddr *him, int *len);
-  static int recvfrom(int fd, char *buf, int nbytes, int flags,
-                             struct sockaddr *from, int *fromlen);
-  static int get_sock_name(int fd, struct sockaddr *him, int *len);
-  static int sendto(int fd, char *buf, int len, int flags,
-                           struct sockaddr *to, int tolen);
-  static int socket_available(int fd, jint *pbytes);
+  static int connect(int fd, struct sockaddr* him, socklen_t len);
+  static int bind(int fd, struct sockaddr* him, socklen_t len);
+  static int accept(int fd, struct sockaddr* him, socklen_t* len);
+  static int recvfrom(int fd, char* buf, size_t nbytes, uint flags,
+                      struct sockaddr* from, socklen_t* fromlen);
+  static int get_sock_name(int fd, struct sockaddr* him, socklen_t* len);
+  static int sendto(int fd, char* buf, size_t len, uint flags,
+                    struct sockaddr* to, socklen_t tolen);
+  static int socket_available(int fd, jint* pbytes);
 
   static int get_sock_opt(int fd, int level, int optname,
-                           char *optval, int* optlen);
+                          char* optval, socklen_t* optlen);
   static int set_sock_opt(int fd, int level, int optname,
-                           const char *optval, int optlen);
+                          const char* optval, socklen_t optlen);
   static int get_host_name(char* name, int namelen);
 
-  static struct hostent*  get_host_by_name(char* name);
+  static struct hostent* get_host_by_name(char* name);
 
   // Printing 64 bit integers
   static const char* jlong_format_specifier();
@@ -715,7 +715,6 @@
 # include "os_bsd_zero.hpp"
 #endif
 
-
   // debugging support (mostly used by debug.cpp but also fatal error handler)
   static bool find(address pc, outputStream* st = tty); // OS specific function to make sense out of an address
 
--- a/hotspot/src/share/vm/services/management.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/services/management.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -33,6 +33,7 @@
 #include "oops/objArrayKlass.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/arguments.hpp"
+#include "runtime/globals.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/javaCalls.hpp"
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Fri Dec 23 15:24:43 2011 -0800
@@ -175,6 +175,9 @@
 const int MICROUNITS    = 1000000;      // micro units per base unit
 const int NANOUNITS     = 1000000000;   // nano units per base unit
 
+const jlong NANOSECS_PER_SEC      = CONST64(1000000000);
+const jint  NANOSECS_PER_MILLISEC = 1000000;
+
 inline const char* proper_unit_for_byte_size(size_t s) {
   if (s >= 10*M) {
     return "M";
--- a/hotspot/src/share/vm/utilities/ostream.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/utilities/ostream.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -1021,7 +1021,7 @@
 
 void networkStream::flush() {
   if (size() != 0) {
-    int result = os::raw_send(_socket, (char *)base(), (int)size(), 0);
+    int result = os::raw_send(_socket, (char *)base(), size(), 0);
     assert(result != -1, "connection error");
     assert(result == (int)size(), "didn't send enough data");
   }
--- a/hotspot/src/share/vm/utilities/quickSort.cpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/utilities/quickSort.cpp	Fri Dec 23 15:24:43 2011 -0800
@@ -93,8 +93,7 @@
   return compare_arrays(arrayToSort, expectedResult, length);
 }
 
-bool QuickSort::test_quick_sort() {
-  tty->print_cr("test_quick_sort");
+void QuickSort::test_quick_sort() {
   {
     int* test_array = NULL;
     int* expected_array = NULL;
@@ -214,7 +213,6 @@
     delete[] test_array;
     delete[] expected_array;
   }
-  return true;
 }
 
 #endif
--- a/hotspot/src/share/vm/utilities/quickSort.hpp	Thu Dec 22 19:00:20 2011 -0800
+++ b/hotspot/src/share/vm/utilities/quickSort.hpp	Fri Dec 23 15:24:43 2011 -0800
@@ -130,7 +130,7 @@
   static void print_array(const char* prefix, int* array, int length);
   static bool compare_arrays(int* actual, int* expected, int length);
   template <class C> static bool sort_and_compare(int* arrayToSort, int* expectedResult, int length, C comparator, bool idempotent = false);
-  static bool test_quick_sort();
+  static void test_quick_sort();
 #endif
 };